Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	b9e876b2bcec19ad9413c7406a81140932a9ab818b77fb491c8f4ac74828cb4f
Browse filesThis view is limited to 50 files because it contains too many changes.  
							See raw diff
- .gitattributes +8 -0
- requirements.txt +21 -0
- test.py +230 -0
- third-party/DPVO/build/lib.win-amd64-3.9/cuda_ba.cp39-win_amd64.pyd +0 -0
- third-party/DPVO/build/lib.win-amd64-3.9/cuda_corr.cp39-win_amd64.pyd +0 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/__init__.py +0 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/altcorr/__init__.py +1 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/altcorr/correlation.py +74 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/ba.py +182 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/blocks.py +118 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/config.py +27 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/__init__.py +1 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/augmentation.py +66 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/base.py +176 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/factory.py +26 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/frame_utils.py +164 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/rgbd_utils.py +188 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/tartan.py +110 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/dpvo.py +402 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/extractor.py +264 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/fastba/__init__.py +1 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/fastba/ba.py +8 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/__init__.py +2 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/broadcasting.py +31 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/gradcheck.py +592 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/group_ops.py +102 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/groups.py +322 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/run_tests.py +302 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/logger.py +58 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/net.py +270 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/plot_utils.py +52 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/projective_ops.py +121 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/stream.py +87 -0
- third-party/DPVO/build/lib.win-amd64-3.9/dpvo/utils.py +87 -0
- third-party/DPVO/build/lib.win-amd64-3.9/lietorch_backends.cp39-win_amd64.pyd +3 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/.ninja_deps +0 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/.ninja_log +6 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/build.ninja +29 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation.obj +3 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation_kernel.obj +3 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/cuda_corr.cp39-win_amd64.exp +0 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/cuda_corr.cp39-win_amd64.lib +0 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba.obj +3 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba_cuda.obj +3 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/cuda_ba.cp39-win_amd64.exp +0 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/cuda_ba.cp39-win_amd64.lib +0 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch.obj +3 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_backends.cp39-win_amd64.exp +0 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_backends.cp39-win_amd64.lib +0 -0
- third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_cpu.obj +3 -0
    	
        .gitattributes
    CHANGED
    
    | @@ -41,3 +41,11 @@ examples/test16.mov filter=lfs diff=lfs merge=lfs -text | |
| 41 | 
             
            examples/test17.mov filter=lfs diff=lfs merge=lfs -text
         | 
| 42 | 
             
            examples/test18.mov filter=lfs diff=lfs merge=lfs -text
         | 
| 43 | 
             
            examples/test19.mov filter=lfs diff=lfs merge=lfs -text
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 41 | 
             
            examples/test17.mov filter=lfs diff=lfs merge=lfs -text
         | 
| 42 | 
             
            examples/test18.mov filter=lfs diff=lfs merge=lfs -text
         | 
| 43 | 
             
            examples/test19.mov filter=lfs diff=lfs merge=lfs -text
         | 
| 44 | 
            +
            third-party/DPVO/build/lib.win-amd64-3.9/lietorch_backends.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
         | 
| 45 | 
            +
            third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation.obj filter=lfs diff=lfs merge=lfs -text
         | 
| 46 | 
            +
            third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation_kernel.obj filter=lfs diff=lfs merge=lfs -text
         | 
| 47 | 
            +
            third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba.obj filter=lfs diff=lfs merge=lfs -text
         | 
| 48 | 
            +
            third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba_cuda.obj filter=lfs diff=lfs merge=lfs -text
         | 
| 49 | 
            +
            third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch.obj filter=lfs diff=lfs merge=lfs -text
         | 
| 50 | 
            +
            third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_cpu.obj filter=lfs diff=lfs merge=lfs -text
         | 
| 51 | 
            +
            third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_gpu.obj filter=lfs diff=lfs merge=lfs -text
         | 
    	
        requirements.txt
    ADDED
    
    | @@ -0,0 +1,21 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            chumpy @ git+https://github.com/mattloper/chumpy
         | 
| 2 | 
            +
            numpy==1.22.3
         | 
| 3 | 
            +
            yacs
         | 
| 4 | 
            +
            joblib
         | 
| 5 | 
            +
            scikit-image
         | 
| 6 | 
            +
            opencv-python
         | 
| 7 | 
            +
            imageio[ffmpeg]
         | 
| 8 | 
            +
            matplotlib
         | 
| 9 | 
            +
            tensorboard
         | 
| 10 | 
            +
            smplx
         | 
| 11 | 
            +
            progress
         | 
| 12 | 
            +
            einops
         | 
| 13 | 
            +
            mmcv==1.3.9
         | 
| 14 | 
            +
            timm==0.4.9
         | 
| 15 | 
            +
            munkres
         | 
| 16 | 
            +
            xtcocotools>=1.8
         | 
| 17 | 
            +
            loguru
         | 
| 18 | 
            +
            setuptools==59.5.0
         | 
| 19 | 
            +
            tqdm
         | 
| 20 | 
            +
            ultralytics
         | 
| 21 | 
            +
            gdown==4.6.0
         | 
    	
        test.py
    ADDED
    
    | @@ -0,0 +1,230 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import os
         | 
| 2 | 
            +
            import argparse
         | 
| 3 | 
            +
            import os.path as osp
         | 
| 4 | 
            +
            from glob import glob
         | 
| 5 | 
            +
            from collections import defaultdict
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            import cv2
         | 
| 8 | 
            +
            import torch
         | 
| 9 | 
            +
            import joblib
         | 
| 10 | 
            +
            import numpy as np
         | 
| 11 | 
            +
            from loguru import logger
         | 
| 12 | 
            +
            from progress.bar import Bar
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            from configs.config import get_cfg_defaults
         | 
| 15 | 
            +
            from lib.data.datasets import CustomDataset
         | 
| 16 | 
            +
            from lib.utils.imutils import avg_preds
         | 
| 17 | 
            +
            from lib.utils.transforms import matrix_to_axis_angle
         | 
| 18 | 
            +
            from lib.models import build_network, build_body_model
         | 
| 19 | 
            +
            from lib.models.preproc.detector import DetectionModel
         | 
| 20 | 
            +
            from lib.models.preproc.extractor import FeatureExtractor
         | 
| 21 | 
            +
            from lib.models.smplify import TemporalSMPLify
         | 
| 22 | 
            +
             | 
| 23 | 
            +
            try:
         | 
| 24 | 
            +
                from lib.models.preproc.slam import SLAMModel
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                _run_global = True
         | 
| 27 | 
            +
            except:
         | 
| 28 | 
            +
                logger.info('DPVO is not properly installed. Only estimate in local coordinates !')
         | 
| 29 | 
            +
                _run_global = False
         | 
| 30 | 
            +
             | 
| 31 | 
            +
             | 
| 32 | 
            +
            def run(cfg,
         | 
| 33 | 
            +
                    video,
         | 
| 34 | 
            +
                    output_pth,
         | 
| 35 | 
            +
                    network,
         | 
| 36 | 
            +
                    calib=None,
         | 
| 37 | 
            +
                    run_global=True,
         | 
| 38 | 
            +
                    save_pkl=False,
         | 
| 39 | 
            +
                    visualize=False,
         | 
| 40 | 
            +
                    run_smplify=False):
         | 
| 41 | 
            +
                cap = cv2.VideoCapture(video)
         | 
| 42 | 
            +
                assert cap.isOpened(), f'Failed to load video file {video}'
         | 
| 43 | 
            +
                fps = cap.get(cv2.CAP_PROP_FPS)
         | 
| 44 | 
            +
                length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
         | 
| 45 | 
            +
                width, height = cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                # Whether or not estimating motion in global coordinates
         | 
| 48 | 
            +
                run_global = run_global and _run_global
         | 
| 49 | 
            +
             | 
| 50 | 
            +
                # Preprocess
         | 
| 51 | 
            +
                with torch.no_grad():
         | 
| 52 | 
            +
                    if not (osp.exists(osp.join(output_pth, 'tracking_results.pth')) and
         | 
| 53 | 
            +
                            osp.exists(osp.join(output_pth, 'slam_results.pth'))):
         | 
| 54 | 
            +
             | 
| 55 | 
            +
                        detector = DetectionModel(cfg.DEVICE.lower())
         | 
| 56 | 
            +
                        extractor = FeatureExtractor(cfg.DEVICE.lower(), cfg.FLIP_EVAL)
         | 
| 57 | 
            +
             | 
| 58 | 
            +
                        if run_global:
         | 
| 59 | 
            +
                            slam = SLAMModel(video, output_pth, width, height, calib)
         | 
| 60 | 
            +
                        else:
         | 
| 61 | 
            +
                            slam = None
         | 
| 62 | 
            +
             | 
| 63 | 
            +
                        bar = Bar('Preprocess: 2D detection and SLAM', fill='#', max=length)
         | 
| 64 | 
            +
                        while (cap.isOpened()):
         | 
| 65 | 
            +
                            flag, img = cap.read()
         | 
| 66 | 
            +
                            if not flag: break
         | 
| 67 | 
            +
             | 
| 68 | 
            +
                            # 2D detection and tracking
         | 
| 69 | 
            +
                            detector.track(img, fps, length)
         | 
| 70 | 
            +
             | 
| 71 | 
            +
                            # SLAM
         | 
| 72 | 
            +
                            if slam is not None:
         | 
| 73 | 
            +
                                slam.track()
         | 
| 74 | 
            +
             | 
| 75 | 
            +
                            bar.next()
         | 
| 76 | 
            +
             | 
| 77 | 
            +
                        tracking_results = detector.process(fps)
         | 
| 78 | 
            +
             | 
| 79 | 
            +
                        if slam is not None:
         | 
| 80 | 
            +
                            slam_results = slam.process()
         | 
| 81 | 
            +
                        else:
         | 
| 82 | 
            +
                            slam_results = np.zeros((length, 7))
         | 
| 83 | 
            +
                            slam_results[:, 3] = 1.0  # Unit quaternion
         | 
| 84 | 
            +
             | 
| 85 | 
            +
                        # Extract image features
         | 
| 86 | 
            +
                        # TODO: Merge this into the previous while loop with an online bbox smoothing.
         | 
| 87 | 
            +
                        tracking_results = extractor.run(video, tracking_results)
         | 
| 88 | 
            +
                        logger.info('Complete Data preprocessing!')
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                        # Save the processed data
         | 
| 91 | 
            +
                        joblib.dump(tracking_results, osp.join(output_pth, 'tracking_results.pth'))
         | 
| 92 | 
            +
                        joblib.dump(slam_results, osp.join(output_pth, 'slam_results.pth'))
         | 
| 93 | 
            +
                        logger.info(f'Save processed data at {output_pth}')
         | 
| 94 | 
            +
             | 
| 95 | 
            +
                    # If the processed data already exists, load the processed data
         | 
| 96 | 
            +
                    else:
         | 
| 97 | 
            +
                        tracking_results = joblib.load(osp.join(output_pth, 'tracking_results.pth'))
         | 
| 98 | 
            +
                        slam_results = joblib.load(osp.join(output_pth, 'slam_results.pth'))
         | 
| 99 | 
            +
                        logger.info(f'Already processed data exists at {output_pth} ! Load the data .')
         | 
| 100 | 
            +
             | 
| 101 | 
            +
                # Build dataset
         | 
| 102 | 
            +
                dataset = CustomDataset(cfg, tracking_results, slam_results, width, height, fps)
         | 
| 103 | 
            +
             | 
| 104 | 
            +
                # run WHAM
         | 
| 105 | 
            +
                results = defaultdict(dict)
         | 
| 106 | 
            +
             | 
| 107 | 
            +
                n_subjs = len(dataset)
         | 
| 108 | 
            +
                for subj in range(n_subjs):
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                    with torch.no_grad():
         | 
| 111 | 
            +
                        if cfg.FLIP_EVAL:
         | 
| 112 | 
            +
                            # Forward pass with flipped input
         | 
| 113 | 
            +
                            flipped_batch = dataset.load_data(subj, True)
         | 
| 114 | 
            +
                            _id, x, inits, features, mask, init_root, cam_angvel, frame_id, kwargs = flipped_batch
         | 
| 115 | 
            +
                            flipped_pred = network(x, inits, features, mask=mask, init_root=init_root, cam_angvel=cam_angvel,
         | 
| 116 | 
            +
                                                   return_y_up=True, **kwargs)
         | 
| 117 | 
            +
             | 
| 118 | 
            +
                            # Forward pass with normal input
         | 
| 119 | 
            +
                            batch = dataset.load_data(subj)
         | 
| 120 | 
            +
                            _id, x, inits, features, mask, init_root, cam_angvel, frame_id, kwargs = batch
         | 
| 121 | 
            +
                            pred = network(x, inits, features, mask=mask, init_root=init_root, cam_angvel=cam_angvel,
         | 
| 122 | 
            +
                                           return_y_up=True, **kwargs)
         | 
| 123 | 
            +
             | 
| 124 | 
            +
                            # Merge two predictions
         | 
| 125 | 
            +
                            flipped_pose, flipped_shape = flipped_pred['pose'].squeeze(0), flipped_pred['betas'].squeeze(0)
         | 
| 126 | 
            +
                            pose, shape = pred['pose'].squeeze(0), pred['betas'].squeeze(0)
         | 
| 127 | 
            +
                            flipped_pose, pose = flipped_pose.reshape(-1, 24, 6), pose.reshape(-1, 24, 6)
         | 
| 128 | 
            +
                            avg_pose, avg_shape = avg_preds(pose, shape, flipped_pose, flipped_shape)
         | 
| 129 | 
            +
                            avg_pose = avg_pose.reshape(-1, 144)
         | 
| 130 | 
            +
                            avg_contact = (flipped_pred['contact'][..., [2, 3, 0, 1]] + pred['contact']) / 2
         | 
| 131 | 
            +
             | 
| 132 | 
            +
                            # Refine trajectory with merged prediction
         | 
| 133 | 
            +
                            network.pred_pose = avg_pose.view_as(network.pred_pose)
         | 
| 134 | 
            +
                            network.pred_shape = avg_shape.view_as(network.pred_shape)
         | 
| 135 | 
            +
                            network.pred_contact = avg_contact.view_as(network.pred_contact)
         | 
| 136 | 
            +
                            output = network.forward_smpl(**kwargs)
         | 
| 137 | 
            +
                            pred = network.refine_trajectory(output, cam_angvel, return_y_up=True)
         | 
| 138 | 
            +
             | 
| 139 | 
            +
                        else:
         | 
| 140 | 
            +
                            # data
         | 
| 141 | 
            +
                            batch = dataset.load_data(subj)
         | 
| 142 | 
            +
                            _id, x, inits, features, mask, init_root, cam_angvel, frame_id, kwargs = batch
         | 
| 143 | 
            +
             | 
| 144 | 
            +
                            # inference
         | 
| 145 | 
            +
                            pred = network(x, inits, features, mask=mask, init_root=init_root, cam_angvel=cam_angvel,
         | 
| 146 | 
            +
                                           return_y_up=True, **kwargs)
         | 
| 147 | 
            +
             | 
| 148 | 
            +
                    # if False:
         | 
| 149 | 
            +
                    if run_smplify:
         | 
| 150 | 
            +
                        smplify = TemporalSMPLify(smpl, img_w=width, img_h=height, device=cfg.DEVICE)
         | 
| 151 | 
            +
                        input_keypoints = dataset.tracking_results[_id]['keypoints']
         | 
| 152 | 
            +
                        pred = smplify.fit(pred, input_keypoints, **kwargs)
         | 
| 153 | 
            +
             | 
| 154 | 
            +
                        with torch.no_grad():
         | 
| 155 | 
            +
                            network.pred_pose = pred['pose']
         | 
| 156 | 
            +
                            network.pred_shape = pred['betas']
         | 
| 157 | 
            +
                            network.pred_cam = pred['cam']
         | 
| 158 | 
            +
                            output = network.forward_smpl(**kwargs)
         | 
| 159 | 
            +
                            pred = network.refine_trajectory(output, cam_angvel, return_y_up=True)
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                    # ========= Store results ========= #
         | 
| 162 | 
            +
                    pred_body_pose = matrix_to_axis_angle(pred['poses_body']).cpu().numpy().reshape(-1, 69)
         | 
| 163 | 
            +
                    pred_root = matrix_to_axis_angle(pred['poses_root_cam']).cpu().numpy().reshape(-1, 3)
         | 
| 164 | 
            +
                    pred_root_world = matrix_to_axis_angle(pred['poses_root_world']).cpu().numpy().reshape(-1, 3)
         | 
| 165 | 
            +
                    pred_pose = np.concatenate((pred_root, pred_body_pose), axis=-1)
         | 
| 166 | 
            +
                    pred_pose_world = np.concatenate((pred_root_world, pred_body_pose), axis=-1)
         | 
| 167 | 
            +
                    pred_trans = (pred['trans_cam'] - network.output.offset).cpu().numpy()
         | 
| 168 | 
            +
             | 
| 169 | 
            +
                    results[_id]['pose'] = pred_pose
         | 
| 170 | 
            +
                    results[_id]['trans'] = pred_trans
         | 
| 171 | 
            +
                    results[_id]['pose_world'] = pred_pose_world
         | 
| 172 | 
            +
                    results[_id]['trans_world'] = pred['trans_world'].cpu().squeeze(0).numpy()
         | 
| 173 | 
            +
                    results[_id]['betas'] = pred['betas'].cpu().squeeze(0).numpy()
         | 
| 174 | 
            +
                    results[_id]['verts'] = (pred['verts_cam'] + pred['trans_cam'].unsqueeze(1)).cpu().numpy()
         | 
| 175 | 
            +
                    results[_id]['frame_ids'] = frame_id
         | 
| 176 | 
            +
             | 
| 177 | 
            +
                if save_pkl:
         | 
| 178 | 
            +
                    joblib.dump(results, osp.join(output_pth, "wham_output.pkl"))
         | 
| 179 | 
            +
             | 
| 180 | 
            +
                # Visualize
         | 
| 181 | 
            +
                if visualize:
         | 
| 182 | 
            +
                    from lib.vis.run_vis import run_vis_on_demo
         | 
| 183 | 
            +
                    with torch.no_grad():
         | 
| 184 | 
            +
                        run_vis_on_demo(cfg, video, results, output_pth, network.smpl, vis_global=run_global)
         | 
| 185 | 
            +
             | 
| 186 | 
            +
             | 
| 187 | 
            +
            if __name__ == '__main__':
         | 
| 188 | 
            +
             | 
| 189 | 
            +
                VIDEO_PATH = "examples/test19.mov"
         | 
| 190 | 
            +
                OUTPUT_PATH = "output/demo"
         | 
| 191 | 
            +
                CALIB_PATH = None
         | 
| 192 | 
            +
                ESTIMATE_LOCAL_ONLY = False
         | 
| 193 | 
            +
                VISUALIZE = True
         | 
| 194 | 
            +
                SAVE_PKL = True
         | 
| 195 | 
            +
                RUN_SMPLIFY = False
         | 
| 196 | 
            +
                GENDER = 'male'
         | 
| 197 | 
            +
             | 
| 198 | 
            +
             | 
| 199 | 
            +
                cfg = get_cfg_defaults()
         | 
| 200 | 
            +
                cfg.merge_from_file('configs/yamls/demo.yaml')
         | 
| 201 | 
            +
             | 
| 202 | 
            +
                logger.info(f'GPU name -> {torch.cuda.get_device_name()}')
         | 
| 203 | 
            +
                logger.info(f'GPU feat -> {torch.cuda.get_device_properties("cuda")}')
         | 
| 204 | 
            +
             | 
| 205 | 
            +
                # ========= Load WHAM ========= #
         | 
| 206 | 
            +
                smpl_batch_size = cfg.TRAIN.BATCH_SIZE * cfg.DATASET.SEQLEN
         | 
| 207 | 
            +
                smpl = build_body_model(device=cfg.DEVICE, gender=GENDER, batch_size=smpl_batch_size)
         | 
| 208 | 
            +
                network = build_network(cfg, smpl)
         | 
| 209 | 
            +
                network.eval()
         | 
| 210 | 
            +
             | 
| 211 | 
            +
                # Output folder
         | 
| 212 | 
            +
                sequence = '.'.join(VIDEO_PATH.split('/')[-1].split('.')[:-1])
         | 
| 213 | 
            +
                output_pth = osp.join(OUTPUT_PATH, sequence)
         | 
| 214 | 
            +
                os.makedirs(output_pth, exist_ok=True)
         | 
| 215 | 
            +
             | 
| 216 | 
            +
                faces_np = network.smpl.get_faces()
         | 
| 217 | 
            +
                np.save(osp.join(output_pth, f'faces_{GENDER}.npy'), faces_np)
         | 
| 218 | 
            +
             | 
| 219 | 
            +
                run(cfg,
         | 
| 220 | 
            +
                    VIDEO_PATH,
         | 
| 221 | 
            +
                    output_pth,
         | 
| 222 | 
            +
                    network,
         | 
| 223 | 
            +
                    CALIB_PATH,
         | 
| 224 | 
            +
                    run_global=not ESTIMATE_LOCAL_ONLY,
         | 
| 225 | 
            +
                    save_pkl=SAVE_PKL,
         | 
| 226 | 
            +
                    visualize=VISUALIZE,
         | 
| 227 | 
            +
                    run_smplify=RUN_SMPLIFY)
         | 
| 228 | 
            +
             | 
| 229 | 
            +
                print()
         | 
| 230 | 
            +
                logger.info('Done !')
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/cuda_ba.cp39-win_amd64.pyd
    ADDED
    
    | Binary file (366 kB). View file | 
|  | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/cuda_corr.cp39-win_amd64.pyd
    ADDED
    
    | Binary file (421 kB). View file | 
|  | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/__init__.py
    ADDED
    
    | 
            File without changes
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/altcorr/__init__.py
    ADDED
    
    | @@ -0,0 +1 @@ | |
|  | 
|  | |
| 1 | 
            +
            from .correlation import corr, patchify
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/altcorr/correlation.py
    ADDED
    
    | @@ -0,0 +1,74 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import cuda_corr
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            class CorrLayer(torch.autograd.Function):
         | 
| 5 | 
            +
                @staticmethod
         | 
| 6 | 
            +
                def forward(ctx, fmap1, fmap2, coords, ii, jj, radius, dropout):
         | 
| 7 | 
            +
                    """ forward correlation """
         | 
| 8 | 
            +
                    ctx.save_for_backward(fmap1, fmap2, coords, ii, jj)
         | 
| 9 | 
            +
                    ctx.radius = radius
         | 
| 10 | 
            +
                    ctx.dropout = dropout
         | 
| 11 | 
            +
                    corr, = cuda_corr.forward(fmap1, fmap2, coords, ii, jj, radius)
         | 
| 12 | 
            +
             | 
| 13 | 
            +
                    return corr
         | 
| 14 | 
            +
             | 
| 15 | 
            +
                @staticmethod
         | 
| 16 | 
            +
                def backward(ctx, grad):
         | 
| 17 | 
            +
                    """ backward correlation """
         | 
| 18 | 
            +
                    fmap1, fmap2, coords, ii, jj = ctx.saved_tensors
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                    if ctx.dropout < 1:
         | 
| 21 | 
            +
                        perm = torch.rand(len(ii), device="cuda") < ctx.dropout
         | 
| 22 | 
            +
                        coords = coords[:,perm]
         | 
| 23 | 
            +
                        grad = grad[:,perm]
         | 
| 24 | 
            +
                        ii = ii[perm]
         | 
| 25 | 
            +
                        jj = jj[perm]
         | 
| 26 | 
            +
             | 
| 27 | 
            +
                    fmap1_grad, fmap2_grad = \
         | 
| 28 | 
            +
                        cuda_corr.backward(fmap1, fmap2, coords, ii, jj, grad, ctx.radius)
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                    return fmap1_grad, fmap2_grad, None, None, None, None, None
         | 
| 31 | 
            +
             | 
| 32 | 
            +
             | 
| 33 | 
            +
            class PatchLayer(torch.autograd.Function):
         | 
| 34 | 
            +
                @staticmethod
         | 
| 35 | 
            +
                def forward(ctx, net, coords, radius):
         | 
| 36 | 
            +
                    """ forward patchify """
         | 
| 37 | 
            +
                    ctx.radius = radius
         | 
| 38 | 
            +
                    ctx.save_for_backward(net, coords)
         | 
| 39 | 
            +
                    
         | 
| 40 | 
            +
                    patches, = cuda_corr.patchify_forward(net, coords, radius)
         | 
| 41 | 
            +
                    return patches
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                @staticmethod
         | 
| 44 | 
            +
                def backward(ctx, grad):
         | 
| 45 | 
            +
                    """ backward patchify """
         | 
| 46 | 
            +
                    net, coords = ctx.saved_tensors
         | 
| 47 | 
            +
                    grad, = cuda_corr.patchify_backward(net, coords, grad, ctx.radius)
         | 
| 48 | 
            +
             | 
| 49 | 
            +
                    return grad, None, None
         | 
| 50 | 
            +
             | 
| 51 | 
            +
            def patchify(net, coords, radius, mode='bilinear'):
         | 
| 52 | 
            +
                """ extract patches """
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                patches = PatchLayer.apply(net, coords, radius)
         | 
| 55 | 
            +
             | 
| 56 | 
            +
                if mode == 'bilinear':
         | 
| 57 | 
            +
                    offset = (coords - coords.floor()).to(net.device)
         | 
| 58 | 
            +
                    dx, dy = offset[:,:,None,None,None].unbind(dim=-1)
         | 
| 59 | 
            +
             | 
| 60 | 
            +
                    d = 2 * radius + 1
         | 
| 61 | 
            +
                    x00 = (1-dy) * (1-dx) * patches[...,:d,:d]
         | 
| 62 | 
            +
                    x01 = (1-dy) * (  dx) * patches[...,:d,1:]
         | 
| 63 | 
            +
                    x10 = (  dy) * (1-dx) * patches[...,1:,:d]
         | 
| 64 | 
            +
                    x11 = (  dy) * (  dx) * patches[...,1:,1:]
         | 
| 65 | 
            +
             | 
| 66 | 
            +
                    return x00 + x01 + x10 + x11
         | 
| 67 | 
            +
             | 
| 68 | 
            +
                return patches
         | 
| 69 | 
            +
                
         | 
| 70 | 
            +
             | 
| 71 | 
            +
            def corr(fmap1, fmap2, coords, ii, jj, radius=1, dropout=1):
         | 
| 72 | 
            +
                return CorrLayer.apply(fmap1, fmap2, coords, ii, jj, radius, dropout)
         | 
| 73 | 
            +
             | 
| 74 | 
            +
             | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/ba.py
    ADDED
    
    | @@ -0,0 +1,182 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            from torch_scatter import scatter_sum
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            from . import fastba
         | 
| 5 | 
            +
            from . import lietorch
         | 
| 6 | 
            +
            from .lietorch import SE3
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            from .utils import Timer
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            from . import projective_ops as pops
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            class CholeskySolver(torch.autograd.Function):
         | 
| 13 | 
            +
                @staticmethod
         | 
| 14 | 
            +
                def forward(ctx, H, b):
         | 
| 15 | 
            +
                    # don't crash training if cholesky decomp fails
         | 
| 16 | 
            +
                    U, info = torch.linalg.cholesky_ex(H)
         | 
| 17 | 
            +
             | 
| 18 | 
            +
                    if torch.any(info):
         | 
| 19 | 
            +
                        ctx.failed = True
         | 
| 20 | 
            +
                        return torch.zeros_like(b)
         | 
| 21 | 
            +
             | 
| 22 | 
            +
                    xs = torch.cholesky_solve(b, U)
         | 
| 23 | 
            +
                    ctx.save_for_backward(U, xs)
         | 
| 24 | 
            +
                    ctx.failed = False
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                    return xs
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                @staticmethod
         | 
| 29 | 
            +
                def backward(ctx, grad_x):
         | 
| 30 | 
            +
                    if ctx.failed:
         | 
| 31 | 
            +
                        return None, None
         | 
| 32 | 
            +
             | 
| 33 | 
            +
                    U, xs = ctx.saved_tensors
         | 
| 34 | 
            +
                    dz = torch.cholesky_solve(grad_x, U)
         | 
| 35 | 
            +
                    dH = -torch.matmul(xs, dz.transpose(-1,-2))
         | 
| 36 | 
            +
             | 
| 37 | 
            +
                    return dH, dz
         | 
| 38 | 
            +
             | 
| 39 | 
            +
            # utility functions for scattering ops
         | 
| 40 | 
            +
            def safe_scatter_add_mat(A, ii, jj, n, m):
         | 
| 41 | 
            +
                v = (ii >= 0) & (jj >= 0) & (ii < n) & (jj < m)
         | 
| 42 | 
            +
                return scatter_sum(A[:,v], ii[v]*m + jj[v], dim=1, dim_size=n*m)
         | 
| 43 | 
            +
             | 
| 44 | 
            +
            def safe_scatter_add_vec(b, ii, n):
         | 
| 45 | 
            +
                v = (ii >= 0) & (ii < n)
         | 
| 46 | 
            +
                return scatter_sum(b[:,v], ii[v], dim=1, dim_size=n)
         | 
| 47 | 
            +
             | 
| 48 | 
            +
            # apply retraction operator to inv-depth maps
         | 
| 49 | 
            +
            def disp_retr(disps, dz, ii):
         | 
| 50 | 
            +
                ii = ii.to(device=dz.device)
         | 
| 51 | 
            +
                return disps + scatter_sum(dz, ii, dim=1, dim_size=disps.shape[1])
         | 
| 52 | 
            +
             | 
| 53 | 
            +
            # apply retraction operator to poses
         | 
| 54 | 
            +
            def pose_retr(poses, dx, ii):
         | 
| 55 | 
            +
                ii = ii.to(device=dx.device)
         | 
| 56 | 
            +
                return poses.retr(scatter_sum(dx, ii, dim=1, dim_size=poses.shape[1]))
         | 
| 57 | 
            +
             | 
| 58 | 
            +
            def block_matmul(A, B):
         | 
| 59 | 
            +
                """ block matrix multiply """
         | 
| 60 | 
            +
                b, n1, m1, p1, q1 = A.shape
         | 
| 61 | 
            +
                b, n2, m2, p2, q2 = B.shape
         | 
| 62 | 
            +
                A = A.permute(0, 1, 3, 2, 4).reshape(b, n1*p1, m1*q1)
         | 
| 63 | 
            +
                B = B.permute(0, 1, 3, 2, 4).reshape(b, n2*p2, m2*q2)
         | 
| 64 | 
            +
                return torch.matmul(A, B).reshape(b, n1, p1, m2, q2).permute(0, 1, 3, 2, 4)
         | 
| 65 | 
            +
             | 
| 66 | 
            +
            def block_solve(A, B, ep=1.0, lm=1e-4):
         | 
| 67 | 
            +
                """ block matrix solve """
         | 
| 68 | 
            +
                b, n1, m1, p1, q1 = A.shape
         | 
| 69 | 
            +
                b, n2, m2, p2, q2 = B.shape
         | 
| 70 | 
            +
                A = A.permute(0, 1, 3, 2, 4).reshape(b, n1*p1, m1*q1)
         | 
| 71 | 
            +
                B = B.permute(0, 1, 3, 2, 4).reshape(b, n2*p2, m2*q2)
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                A = A + (ep + lm * A) * torch.eye(n1*p1, device=A.device)
         | 
| 74 | 
            +
             | 
| 75 | 
            +
                X = CholeskySolver.apply(A, B)
         | 
| 76 | 
            +
                return X.reshape(b, n1, p1, m2, q2).permute(0, 1, 3, 2, 4)
         | 
| 77 | 
            +
             | 
| 78 | 
            +
             | 
| 79 | 
            +
            def block_show(A):
         | 
| 80 | 
            +
                import matplotlib.pyplot as plt
         | 
| 81 | 
            +
                b, n1, m1, p1, q1 = A.shape
         | 
| 82 | 
            +
                A = A.permute(0, 1, 3, 2, 4).reshape(b, n1*p1, m1*q1)
         | 
| 83 | 
            +
                plt.imshow(A[0].detach().cpu().numpy())
         | 
| 84 | 
            +
                plt.show()
         | 
| 85 | 
            +
             | 
| 86 | 
            +
            def BA(poses, patches, intrinsics, targets, weights, lmbda, ii, jj, kk, bounds, ep=100.0, PRINT=False, fixedp=1, structure_only=False):
         | 
| 87 | 
            +
                """ bundle adjustment """
         | 
| 88 | 
            +
             | 
| 89 | 
            +
                b = 1
         | 
| 90 | 
            +
                n = max(ii.max().item(), jj.max().item()) + 1
         | 
| 91 | 
            +
             | 
| 92 | 
            +
                coords, v, (Ji, Jj, Jz) = \
         | 
| 93 | 
            +
                    pops.transform(poses, patches, intrinsics, ii, jj, kk, jacobian=True)
         | 
| 94 | 
            +
             | 
| 95 | 
            +
                p = coords.shape[3]
         | 
| 96 | 
            +
                r = targets - coords[...,p//2,p//2,:]
         | 
| 97 | 
            +
             | 
| 98 | 
            +
                v *= (r.norm(dim=-1) < 250).float()
         | 
| 99 | 
            +
             | 
| 100 | 
            +
                in_bounds = \
         | 
| 101 | 
            +
                    (coords[...,p//2,p//2,0] > bounds[0]) & \
         | 
| 102 | 
            +
                    (coords[...,p//2,p//2,1] > bounds[1]) & \
         | 
| 103 | 
            +
                    (coords[...,p//2,p//2,0] < bounds[2]) & \
         | 
| 104 | 
            +
                    (coords[...,p//2,p//2,1] < bounds[3])
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                v *= in_bounds.float()
         | 
| 107 | 
            +
             | 
| 108 | 
            +
                if PRINT:
         | 
| 109 | 
            +
                    print((r * v[...,None]).norm(dim=-1).mean().item())
         | 
| 110 | 
            +
             | 
| 111 | 
            +
                r = (v[...,None] * r).unsqueeze(dim=-1)    
         | 
| 112 | 
            +
                weights = (v[...,None] * weights).unsqueeze(dim=-1)
         | 
| 113 | 
            +
             | 
| 114 | 
            +
                wJiT = (weights * Ji).transpose(2,3)
         | 
| 115 | 
            +
                wJjT = (weights * Jj).transpose(2,3)
         | 
| 116 | 
            +
                wJzT = (weights * Jz).transpose(2,3)
         | 
| 117 | 
            +
             | 
| 118 | 
            +
                Bii = torch.matmul(wJiT, Ji)
         | 
| 119 | 
            +
                Bij = torch.matmul(wJiT, Jj)
         | 
| 120 | 
            +
                Bji = torch.matmul(wJjT, Ji)
         | 
| 121 | 
            +
                Bjj = torch.matmul(wJjT, Jj)
         | 
| 122 | 
            +
             | 
| 123 | 
            +
                Eik = torch.matmul(wJiT, Jz)
         | 
| 124 | 
            +
                Ejk = torch.matmul(wJjT, Jz)
         | 
| 125 | 
            +
             | 
| 126 | 
            +
                vi = torch.matmul(wJiT, r)
         | 
| 127 | 
            +
                vj = torch.matmul(wJjT, r)
         | 
| 128 | 
            +
             | 
| 129 | 
            +
                # fix first pose
         | 
| 130 | 
            +
                ii = ii.clone()
         | 
| 131 | 
            +
                jj = jj.clone()
         | 
| 132 | 
            +
             | 
| 133 | 
            +
                n = n - fixedp
         | 
| 134 | 
            +
                ii = ii - fixedp
         | 
| 135 | 
            +
                jj = jj - fixedp
         | 
| 136 | 
            +
             | 
| 137 | 
            +
                kx, kk = torch.unique(kk, return_inverse=True, sorted=True)
         | 
| 138 | 
            +
                m = len(kx)
         | 
| 139 | 
            +
             | 
| 140 | 
            +
                B = safe_scatter_add_mat(Bii, ii, ii, n, n).view(b, n, n, 6, 6) + \
         | 
| 141 | 
            +
                    safe_scatter_add_mat(Bij, ii, jj, n, n).view(b, n, n, 6, 6) + \
         | 
| 142 | 
            +
                    safe_scatter_add_mat(Bji, jj, ii, n, n).view(b, n, n, 6, 6) + \
         | 
| 143 | 
            +
                    safe_scatter_add_mat(Bjj, jj, jj, n, n).view(b, n, n, 6, 6)
         | 
| 144 | 
            +
             | 
| 145 | 
            +
                E = safe_scatter_add_mat(Eik, ii, kk, n, m).view(b, n, m, 6, 1) + \
         | 
| 146 | 
            +
                    safe_scatter_add_mat(Ejk, jj, kk, n, m).view(b, n, m, 6, 1) 
         | 
| 147 | 
            +
             | 
| 148 | 
            +
                C = safe_scatter_add_vec(torch.matmul(wJzT, Jz), kk, m)
         | 
| 149 | 
            +
             | 
| 150 | 
            +
                v = safe_scatter_add_vec(vi, ii, n).view(b, n, 1, 6, 1) + \
         | 
| 151 | 
            +
                    safe_scatter_add_vec(vj, jj, n).view(b, n, 1, 6, 1)
         | 
| 152 | 
            +
             | 
| 153 | 
            +
                w = safe_scatter_add_vec(torch.matmul(wJzT,  r), kk, m)
         | 
| 154 | 
            +
             | 
| 155 | 
            +
                if isinstance(lmbda, torch.Tensor):
         | 
| 156 | 
            +
                    lmbda = lmbda.reshape(*C.shape)
         | 
| 157 | 
            +
                    
         | 
| 158 | 
            +
                Q = 1.0 / (C + lmbda)
         | 
| 159 | 
            +
                
         | 
| 160 | 
            +
                ### solve w/ schur complement ###
         | 
| 161 | 
            +
                EQ = E * Q[:,None]
         | 
| 162 | 
            +
             | 
| 163 | 
            +
                if structure_only or n == 0:
         | 
| 164 | 
            +
                    dZ = (Q * w).view(b, -1, 1, 1)
         | 
| 165 | 
            +
             | 
| 166 | 
            +
                else:
         | 
| 167 | 
            +
                    S = B - block_matmul(EQ, E.permute(0,2,1,4,3))
         | 
| 168 | 
            +
                    y = v - block_matmul(EQ, w.unsqueeze(dim=2))
         | 
| 169 | 
            +
                    dX = block_solve(S, y, ep=ep, lm=1e-4)
         | 
| 170 | 
            +
             | 
| 171 | 
            +
                    dZ = Q * (w - block_matmul(E.permute(0,2,1,4,3), dX).squeeze(dim=-1))
         | 
| 172 | 
            +
                    dX = dX.view(b, -1, 6)
         | 
| 173 | 
            +
                    dZ = dZ.view(b, -1, 1, 1)
         | 
| 174 | 
            +
             | 
| 175 | 
            +
                x, y, disps = patches.unbind(dim=2)
         | 
| 176 | 
            +
                disps = disp_retr(disps, dZ, kx).clamp(min=1e-3, max=10.0)
         | 
| 177 | 
            +
                patches = torch.stack([x, y, disps], dim=2)
         | 
| 178 | 
            +
             | 
| 179 | 
            +
                if not structure_only and n > 0:
         | 
| 180 | 
            +
                    poses = pose_retr(poses, dX, fixedp + torch.arange(n))
         | 
| 181 | 
            +
             | 
| 182 | 
            +
                return poses, patches
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/blocks.py
    ADDED
    
    | @@ -0,0 +1,118 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import torch.nn as nn
         | 
| 3 | 
            +
            import torch.nn.functional as F
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            import torch_scatter
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            class LayerNorm1D(nn.Module):
         | 
| 8 | 
            +
                def __init__(self, dim):
         | 
| 9 | 
            +
                    super(LayerNorm1D, self).__init__()
         | 
| 10 | 
            +
                    self.norm = nn.LayerNorm(dim, eps=1e-4)
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                def forward(self, x):
         | 
| 13 | 
            +
                    return self.norm(x.transpose(1,2)).transpose(1,2)
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            class GatedResidual(nn.Module):
         | 
| 16 | 
            +
                def __init__(self, dim):
         | 
| 17 | 
            +
                    super().__init__()
         | 
| 18 | 
            +
             | 
| 19 | 
            +
                    self.gate = nn.Sequential(
         | 
| 20 | 
            +
                        nn.Linear(dim, dim),
         | 
| 21 | 
            +
                        nn.Sigmoid())
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                    self.res = nn.Sequential(
         | 
| 24 | 
            +
                        nn.Linear(dim, dim),
         | 
| 25 | 
            +
                        nn.ReLU(inplace=True),
         | 
| 26 | 
            +
                        nn.Linear(dim, dim))
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                def forward(self, x):
         | 
| 29 | 
            +
                    return x + self.gate(x) * self.res(x)
         | 
| 30 | 
            +
             | 
| 31 | 
            +
            class SoftAgg(nn.Module):
         | 
| 32 | 
            +
                def __init__(self, dim=512, expand=True):
         | 
| 33 | 
            +
                    super(SoftAgg, self).__init__()
         | 
| 34 | 
            +
                    self.dim = dim
         | 
| 35 | 
            +
                    self.expand = expand
         | 
| 36 | 
            +
                    self.f = nn.Linear(self.dim, self.dim)
         | 
| 37 | 
            +
                    self.g = nn.Linear(self.dim, self.dim)
         | 
| 38 | 
            +
                    self.h = nn.Linear(self.dim, self.dim)
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                def forward(self, x, ix):
         | 
| 41 | 
            +
                    _, jx = torch.unique(ix, return_inverse=True)
         | 
| 42 | 
            +
                    w = torch_scatter.scatter_softmax(self.g(x), jx, dim=1)
         | 
| 43 | 
            +
                    y = torch_scatter.scatter_sum(self.f(x) * w, jx, dim=1)
         | 
| 44 | 
            +
             | 
| 45 | 
            +
                    if self.expand:
         | 
| 46 | 
            +
                        return self.h(y)[:,jx]
         | 
| 47 | 
            +
                        
         | 
| 48 | 
            +
                    return self.h(y)
         | 
| 49 | 
            +
             | 
| 50 | 
            +
            class SoftAggBasic(nn.Module):
         | 
| 51 | 
            +
                def __init__(self, dim=512, expand=True):
         | 
| 52 | 
            +
                    super(SoftAggBasic, self).__init__()
         | 
| 53 | 
            +
                    self.dim = dim
         | 
| 54 | 
            +
                    self.expand = expand
         | 
| 55 | 
            +
                    self.f = nn.Linear(self.dim, self.dim)
         | 
| 56 | 
            +
                    self.g = nn.Linear(self.dim,        1)
         | 
| 57 | 
            +
                    self.h = nn.Linear(self.dim, self.dim)
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                def forward(self, x, ix):
         | 
| 60 | 
            +
                    _, jx = torch.unique(ix, return_inverse=True)
         | 
| 61 | 
            +
                    w = torch_scatter.scatter_softmax(self.g(x), jx, dim=1)
         | 
| 62 | 
            +
                    y = torch_scatter.scatter_sum(self.f(x) * w, jx, dim=1)
         | 
| 63 | 
            +
             | 
| 64 | 
            +
                    if self.expand:
         | 
| 65 | 
            +
                        return self.h(y)[:,jx]
         | 
| 66 | 
            +
                        
         | 
| 67 | 
            +
                    return self.h(y)
         | 
| 68 | 
            +
             | 
| 69 | 
            +
             | 
| 70 | 
            +
            ### Gradient Clipping and Zeroing Operations ###
         | 
| 71 | 
            +
             | 
| 72 | 
            +
            GRAD_CLIP = 0.1
         | 
| 73 | 
            +
             | 
| 74 | 
            +
            class GradClip(torch.autograd.Function):
         | 
| 75 | 
            +
                @staticmethod
         | 
| 76 | 
            +
                def forward(ctx, x):
         | 
| 77 | 
            +
                    return x
         | 
| 78 | 
            +
             | 
| 79 | 
            +
                @staticmethod
         | 
| 80 | 
            +
                def backward(ctx, grad_x):
         | 
| 81 | 
            +
                    grad_x = torch.where(torch.isnan(grad_x), torch.zeros_like(grad_x), grad_x)
         | 
| 82 | 
            +
                    return grad_x.clamp(min=-0.01, max=0.01)
         | 
| 83 | 
            +
             | 
| 84 | 
            +
            class GradientClip(nn.Module):
         | 
| 85 | 
            +
                def __init__(self):
         | 
| 86 | 
            +
                    super(GradientClip, self).__init__()
         | 
| 87 | 
            +
             | 
| 88 | 
            +
                def forward(self, x):
         | 
| 89 | 
            +
                    return GradClip.apply(x)
         | 
| 90 | 
            +
             | 
| 91 | 
            +
            class GradZero(torch.autograd.Function):
         | 
| 92 | 
            +
                @staticmethod
         | 
| 93 | 
            +
                def forward(ctx, x):
         | 
| 94 | 
            +
                    return x
         | 
| 95 | 
            +
             | 
| 96 | 
            +
                @staticmethod
         | 
| 97 | 
            +
                def backward(ctx, grad_x):
         | 
| 98 | 
            +
                    grad_x = torch.where(torch.isnan(grad_x), torch.zeros_like(grad_x), grad_x)
         | 
| 99 | 
            +
                    grad_x = torch.where(torch.abs(grad_x) > GRAD_CLIP, torch.zeros_like(grad_x), grad_x)
         | 
| 100 | 
            +
                    return grad_x
         | 
| 101 | 
            +
             | 
| 102 | 
            +
            class GradientZero(nn.Module):
         | 
| 103 | 
            +
                def __init__(self):
         | 
| 104 | 
            +
                    super(GradientZero, self).__init__()
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                def forward(self, x):
         | 
| 107 | 
            +
                    return GradZero.apply(x)
         | 
| 108 | 
            +
             | 
| 109 | 
            +
             | 
| 110 | 
            +
            class GradMag(torch.autograd.Function):
         | 
| 111 | 
            +
                @staticmethod
         | 
| 112 | 
            +
                def forward(ctx, x):
         | 
| 113 | 
            +
                    return x
         | 
| 114 | 
            +
             | 
| 115 | 
            +
                @staticmethod
         | 
| 116 | 
            +
                def backward(ctx, grad_x):
         | 
| 117 | 
            +
                    print(grad_x.abs().mean())
         | 
| 118 | 
            +
                    return grad_x
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/config.py
    ADDED
    
    | @@ -0,0 +1,27 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from yacs.config import CfgNode as CN
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            _C = CN()
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            # max number of keyframes
         | 
| 6 | 
            +
            _C.BUFFER_SIZE = 2048
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            # bias patch selection towards high gradient regions?
         | 
| 9 | 
            +
            _C.GRADIENT_BIAS = True
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            # VO config (increase for better accuracy)
         | 
| 12 | 
            +
            _C.PATCHES_PER_FRAME = 80
         | 
| 13 | 
            +
            _C.REMOVAL_WINDOW = 20
         | 
| 14 | 
            +
            _C.OPTIMIZATION_WINDOW = 12
         | 
| 15 | 
            +
            _C.PATCH_LIFETIME = 12
         | 
| 16 | 
            +
             | 
| 17 | 
            +
            # threshold for keyframe removal
         | 
| 18 | 
            +
            _C.KEYFRAME_INDEX = 4
         | 
| 19 | 
            +
            _C.KEYFRAME_THRESH = 12.5
         | 
| 20 | 
            +
             | 
| 21 | 
            +
            # camera motion model
         | 
| 22 | 
            +
            _C.MOTION_MODEL = 'DAMPED_LINEAR'
         | 
| 23 | 
            +
            _C.MOTION_DAMPING = 0.5
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            _C.MIXED_PRECISION = True
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            cfg = _C
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/__init__.py
    ADDED
    
    | @@ -0,0 +1 @@ | |
|  | 
|  | |
| 1 | 
            +
             | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/augmentation.py
    ADDED
    
    | @@ -0,0 +1,66 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import torchvision.transforms as transforms
         | 
| 3 | 
            +
            import numpy as np
         | 
| 4 | 
            +
            import torch.nn.functional as F
         | 
| 5 | 
            +
             | 
| 6 | 
            +
             | 
| 7 | 
            +
            class RGBDAugmentor:
         | 
| 8 | 
            +
                """ perform augmentation on RGB-D video """
         | 
| 9 | 
            +
             | 
| 10 | 
            +
                def __init__(self, crop_size):
         | 
| 11 | 
            +
                    self.crop_size = crop_size
         | 
| 12 | 
            +
                    self.augcolor = transforms.Compose([
         | 
| 13 | 
            +
                        transforms.ToPILImage(),
         | 
| 14 | 
            +
                        transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2/3.14),
         | 
| 15 | 
            +
                        transforms.RandomGrayscale(p=0.1),
         | 
| 16 | 
            +
                        transforms.RandomInvert(p=0.1),
         | 
| 17 | 
            +
                        transforms.ToTensor()])
         | 
| 18 | 
            +
             | 
| 19 | 
            +
                    self.max_scale = 0.5
         | 
| 20 | 
            +
             | 
| 21 | 
            +
                def spatial_transform(self, images, depths, poses, intrinsics):
         | 
| 22 | 
            +
                    """ cropping and resizing """
         | 
| 23 | 
            +
                    ht, wd = images.shape[2:]
         | 
| 24 | 
            +
             | 
| 25 | 
            +
                    max_scale = self.max_scale
         | 
| 26 | 
            +
                    min_scale = np.log2(np.maximum(
         | 
| 27 | 
            +
                        (self.crop_size[0] + 1) / float(ht),
         | 
| 28 | 
            +
                        (self.crop_size[1] + 1) / float(wd)))
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                    scale = 1
         | 
| 31 | 
            +
                    if np.random.rand() < 0.8:
         | 
| 32 | 
            +
                        scale = 2 ** np.random.uniform(0.0, max_scale)
         | 
| 33 | 
            +
             | 
| 34 | 
            +
                    intrinsics = scale * intrinsics
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                    ht1 = int(scale * ht)
         | 
| 37 | 
            +
                    wd1 = int(scale * wd)
         | 
| 38 | 
            +
             | 
| 39 | 
            +
                    depths = depths.unsqueeze(dim=1)
         | 
| 40 | 
            +
             | 
| 41 | 
            +
                    images = F.interpolate(images, (ht1, wd1), mode='bicubic', align_corners=False)
         | 
| 42 | 
            +
                    depths = F.interpolate(depths, (ht1, wd1), recompute_scale_factor=False)
         | 
| 43 | 
            +
             | 
| 44 | 
            +
                    # always perform center crop (TODO: try non-center crops)
         | 
| 45 | 
            +
                    y0 = (images.shape[2] - self.crop_size[0]) // 2
         | 
| 46 | 
            +
                    x0 = (images.shape[3] - self.crop_size[1]) // 2
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                    intrinsics = intrinsics - torch.tensor([0.0, 0.0, x0, y0])
         | 
| 49 | 
            +
                    images = images[:, :, y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
         | 
| 50 | 
            +
                    depths = depths[:, :, y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]]
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                    depths = depths.squeeze(dim=1)
         | 
| 53 | 
            +
                    return images, poses, depths, intrinsics
         | 
| 54 | 
            +
             | 
| 55 | 
            +
                def color_transform(self, images):
         | 
| 56 | 
            +
                    """ color jittering """
         | 
| 57 | 
            +
                    num, ch, ht, wd = images.shape
         | 
| 58 | 
            +
                    images = images.permute(1, 2, 3, 0).reshape(ch, ht, wd*num)
         | 
| 59 | 
            +
                    images = 255 * self.augcolor(images[[2,1,0]] / 255.0)
         | 
| 60 | 
            +
                    return images[[2,1,0]].reshape(ch, ht, wd, num).permute(3,0,1,2).contiguous()
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                def __call__(self, images, poses, depths, intrinsics):
         | 
| 63 | 
            +
                    if np.random.rand() < 0.5:
         | 
| 64 | 
            +
                        images = self.color_transform(images)
         | 
| 65 | 
            +
             | 
| 66 | 
            +
                    return self.spatial_transform(images, depths, poses, intrinsics)
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/base.py
    ADDED
    
    | @@ -0,0 +1,176 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import numpy as np
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            import torch.utils.data as data
         | 
| 4 | 
            +
            import torch.nn.functional as F
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            import csv
         | 
| 7 | 
            +
            import os
         | 
| 8 | 
            +
            import cv2
         | 
| 9 | 
            +
            import math
         | 
| 10 | 
            +
            import random
         | 
| 11 | 
            +
            import json
         | 
| 12 | 
            +
            import pickle
         | 
| 13 | 
            +
            import os.path as osp
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            from .augmentation import RGBDAugmentor
         | 
| 16 | 
            +
            from .rgbd_utils import *
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            class RGBDDataset(data.Dataset):
         | 
| 19 | 
            +
                def __init__(self, name, datapath, n_frames=4, crop_size=[480,640], fmin=10.0, fmax=75.0, aug=True, sample=True):
         | 
| 20 | 
            +
                    """ Base class for RGBD dataset """
         | 
| 21 | 
            +
                    self.aug = None
         | 
| 22 | 
            +
                    self.root = datapath
         | 
| 23 | 
            +
                    self.name = name
         | 
| 24 | 
            +
             | 
| 25 | 
            +
                    self.aug = aug
         | 
| 26 | 
            +
                    self.sample = sample
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                    self.n_frames = n_frames
         | 
| 29 | 
            +
                    self.fmin = fmin # exclude very easy examples
         | 
| 30 | 
            +
                    self.fmax = fmax # exclude very hard examples
         | 
| 31 | 
            +
                    
         | 
| 32 | 
            +
                    if self.aug:
         | 
| 33 | 
            +
                        self.aug = RGBDAugmentor(crop_size=crop_size)
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                    # building dataset is expensive, cache so only needs to be performed once
         | 
| 36 | 
            +
                    cur_path = osp.dirname(osp.abspath(__file__))
         | 
| 37 | 
            +
                    if not os.path.isdir(osp.join(cur_path, 'cache')):
         | 
| 38 | 
            +
                        os.mkdir(osp.join(cur_path, 'cache'))
         | 
| 39 | 
            +
                    
         | 
| 40 | 
            +
                    self.scene_info = \
         | 
| 41 | 
            +
                        pickle.load(open('datasets/TartanAir.pickle', 'rb'))[0]
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                    self._build_dataset_index()
         | 
| 44 | 
            +
                            
         | 
| 45 | 
            +
                def _build_dataset_index(self):
         | 
| 46 | 
            +
                    self.dataset_index = []
         | 
| 47 | 
            +
                    for scene in self.scene_info:
         | 
| 48 | 
            +
                        if not self.__class__.is_test_scene(scene):
         | 
| 49 | 
            +
                            graph = self.scene_info[scene]['graph']
         | 
| 50 | 
            +
                            for i in graph:
         | 
| 51 | 
            +
                                if i < len(graph) - 65:
         | 
| 52 | 
            +
                                    self.dataset_index.append((scene, i))
         | 
| 53 | 
            +
                        else:
         | 
| 54 | 
            +
                            print("Reserving {} for validation".format(scene))
         | 
| 55 | 
            +
             | 
| 56 | 
            +
                @staticmethod
         | 
| 57 | 
            +
                def image_read(image_file):
         | 
| 58 | 
            +
                    return cv2.imread(image_file)
         | 
| 59 | 
            +
             | 
| 60 | 
            +
                @staticmethod
         | 
| 61 | 
            +
                def depth_read(depth_file):
         | 
| 62 | 
            +
                    return np.load(depth_file)
         | 
| 63 | 
            +
             | 
| 64 | 
            +
                def build_frame_graph(self, poses, depths, intrinsics, f=16, max_flow=256):
         | 
| 65 | 
            +
                    """ compute optical flow distance between all pairs of frames """
         | 
| 66 | 
            +
                    def read_disp(fn):
         | 
| 67 | 
            +
                        depth = self.__class__.depth_read(fn)[f//2::f, f//2::f]
         | 
| 68 | 
            +
                        depth[depth < 0.01] = np.mean(depth)
         | 
| 69 | 
            +
                        return 1.0 / depth
         | 
| 70 | 
            +
             | 
| 71 | 
            +
                    poses = np.array(poses)
         | 
| 72 | 
            +
                    intrinsics = np.array(intrinsics) / f
         | 
| 73 | 
            +
                    
         | 
| 74 | 
            +
                    disps = np.stack(list(map(read_disp, depths)), 0)
         | 
| 75 | 
            +
                    d = f * compute_distance_matrix_flow(poses, disps, intrinsics)
         | 
| 76 | 
            +
             | 
| 77 | 
            +
                    graph = {}
         | 
| 78 | 
            +
                    for i in range(d.shape[0]):
         | 
| 79 | 
            +
                        j, = np.where(d[i] < max_flow)
         | 
| 80 | 
            +
                        graph[i] = (j, d[i,j])
         | 
| 81 | 
            +
             | 
| 82 | 
            +
                    return graph
         | 
| 83 | 
            +
             | 
| 84 | 
            +
                def __getitem__(self, index):
         | 
| 85 | 
            +
                    """ return training video """
         | 
| 86 | 
            +
             | 
| 87 | 
            +
                    index = index % len(self.dataset_index)
         | 
| 88 | 
            +
                    scene_id, ix = self.dataset_index[index]
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                    frame_graph = self.scene_info[scene_id]['graph']
         | 
| 91 | 
            +
                    images_list = self.scene_info[scene_id]['images']
         | 
| 92 | 
            +
                    depths_list = self.scene_info[scene_id]['depths']
         | 
| 93 | 
            +
                    poses_list = self.scene_info[scene_id]['poses']
         | 
| 94 | 
            +
                    intrinsics_list = self.scene_info[scene_id]['intrinsics']
         | 
| 95 | 
            +
             | 
| 96 | 
            +
                    # stride = np.random.choice([1,2,3])
         | 
| 97 | 
            +
             | 
| 98 | 
            +
                    d = np.random.uniform(self.fmin, self.fmax)
         | 
| 99 | 
            +
                    s = 1
         | 
| 100 | 
            +
             | 
| 101 | 
            +
                    inds = [ ix ]
         | 
| 102 | 
            +
             | 
| 103 | 
            +
                    while len(inds) < self.n_frames:
         | 
| 104 | 
            +
                        # get other frames within flow threshold
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                        if self.sample:
         | 
| 107 | 
            +
                            k = (frame_graph[ix][1] > self.fmin) & (frame_graph[ix][1] < self.fmax)
         | 
| 108 | 
            +
                            frames = frame_graph[ix][0][k]
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                            # prefer frames forward in time
         | 
| 111 | 
            +
                            if np.count_nonzero(frames[frames > ix]):
         | 
| 112 | 
            +
                                ix = np.random.choice(frames[frames > ix])
         | 
| 113 | 
            +
             | 
| 114 | 
            +
                            elif ix + 1 < len(images_list):
         | 
| 115 | 
            +
                                ix = ix + 1
         | 
| 116 | 
            +
             | 
| 117 | 
            +
                            elif np.count_nonzero(frames):
         | 
| 118 | 
            +
                                ix = np.random.choice(frames)
         | 
| 119 | 
            +
             | 
| 120 | 
            +
                        else:
         | 
| 121 | 
            +
                            i = frame_graph[ix][0].copy()
         | 
| 122 | 
            +
                            g = frame_graph[ix][1].copy()
         | 
| 123 | 
            +
             | 
| 124 | 
            +
                            g[g > d] = -1
         | 
| 125 | 
            +
                            if s > 0:
         | 
| 126 | 
            +
                                g[i <= ix] = -1
         | 
| 127 | 
            +
                            else:
         | 
| 128 | 
            +
                                g[i >= ix] = -1
         | 
| 129 | 
            +
             | 
| 130 | 
            +
                            if len(g) > 0 and np.max(g) > 0:
         | 
| 131 | 
            +
                                ix = i[np.argmax(g)]
         | 
| 132 | 
            +
                            else:
         | 
| 133 | 
            +
                                if ix + s >= len(images_list) or ix + s < 0:
         | 
| 134 | 
            +
                                    s *= -1
         | 
| 135 | 
            +
             | 
| 136 | 
            +
                                ix = ix + s
         | 
| 137 | 
            +
                        
         | 
| 138 | 
            +
                        inds += [ ix ]
         | 
| 139 | 
            +
             | 
| 140 | 
            +
             | 
| 141 | 
            +
                    images, depths, poses, intrinsics = [], [], [], []
         | 
| 142 | 
            +
                    for i in inds:
         | 
| 143 | 
            +
                        images.append(self.__class__.image_read(images_list[i]))
         | 
| 144 | 
            +
                        depths.append(self.__class__.depth_read(depths_list[i]))
         | 
| 145 | 
            +
                        poses.append(poses_list[i])
         | 
| 146 | 
            +
                        intrinsics.append(intrinsics_list[i])
         | 
| 147 | 
            +
             | 
| 148 | 
            +
                    images = np.stack(images).astype(np.float32)
         | 
| 149 | 
            +
                    depths = np.stack(depths).astype(np.float32)
         | 
| 150 | 
            +
                    poses = np.stack(poses).astype(np.float32)
         | 
| 151 | 
            +
                    intrinsics = np.stack(intrinsics).astype(np.float32)
         | 
| 152 | 
            +
             | 
| 153 | 
            +
                    images = torch.from_numpy(images).float()
         | 
| 154 | 
            +
                    images = images.permute(0, 3, 1, 2)
         | 
| 155 | 
            +
             | 
| 156 | 
            +
                    disps = torch.from_numpy(1.0 / depths)
         | 
| 157 | 
            +
                    poses = torch.from_numpy(poses)
         | 
| 158 | 
            +
                    intrinsics = torch.from_numpy(intrinsics)
         | 
| 159 | 
            +
             | 
| 160 | 
            +
                    if self.aug:
         | 
| 161 | 
            +
                        images, poses, disps, intrinsics = \
         | 
| 162 | 
            +
                            self.aug(images, poses, disps, intrinsics)
         | 
| 163 | 
            +
             | 
| 164 | 
            +
                    # normalize depth
         | 
| 165 | 
            +
                    s = .7 * torch.quantile(disps, .98)
         | 
| 166 | 
            +
                    disps = disps / s
         | 
| 167 | 
            +
                    poses[...,:3] *= s
         | 
| 168 | 
            +
             | 
| 169 | 
            +
                    return images, poses, disps, intrinsics 
         | 
| 170 | 
            +
             | 
| 171 | 
            +
                def __len__(self):
         | 
| 172 | 
            +
                    return len(self.dataset_index)
         | 
| 173 | 
            +
             | 
| 174 | 
            +
                def __imul__(self, x):
         | 
| 175 | 
            +
                    self.dataset_index *= x
         | 
| 176 | 
            +
                    return self
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/factory.py
    ADDED
    
    | @@ -0,0 +1,26 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
             | 
| 2 | 
            +
            import pickle
         | 
| 3 | 
            +
            import os
         | 
| 4 | 
            +
            import os.path as osp
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            # RGBD-Dataset
         | 
| 7 | 
            +
            from .tartan import TartanAir
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            def dataset_factory(dataset_list, **kwargs):
         | 
| 10 | 
            +
                """ create a combined dataset """
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                from torch.utils.data import ConcatDataset
         | 
| 13 | 
            +
             | 
| 14 | 
            +
                dataset_map = { 
         | 
| 15 | 
            +
                    'tartan': (TartanAir, ),
         | 
| 16 | 
            +
                }
         | 
| 17 | 
            +
             | 
| 18 | 
            +
                db_list = []
         | 
| 19 | 
            +
                for key in dataset_list:
         | 
| 20 | 
            +
                    # cache datasets for faster future loading
         | 
| 21 | 
            +
                    db = dataset_map[key][0](**kwargs)
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                    print("Dataset {} has {} images".format(key, len(db)))
         | 
| 24 | 
            +
                    db_list.append(db)
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                return ConcatDataset(db_list)
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/frame_utils.py
    ADDED
    
    | @@ -0,0 +1,164 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import numpy as np
         | 
| 2 | 
            +
            from PIL import Image
         | 
| 3 | 
            +
            from os.path import *
         | 
| 4 | 
            +
            import re
         | 
| 5 | 
            +
            import cv2
         | 
| 6 | 
            +
            cv2.setNumThreads(0)
         | 
| 7 | 
            +
             | 
| 8 | 
            +
             | 
| 9 | 
            +
            TAG_CHAR = np.array([202021.25], np.float32)
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            def readFlowKITTI(filename):
         | 
| 12 | 
            +
                flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR)
         | 
| 13 | 
            +
                flow = flow[:,:,::-1].astype(np.float32)
         | 
| 14 | 
            +
                flow, valid = flow[:, :, :2], flow[:, :, 2]
         | 
| 15 | 
            +
                flow = (flow - 2**15) / 64.0
         | 
| 16 | 
            +
                return flow, valid
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            def readFlow(fn):
         | 
| 19 | 
            +
                """ Read .flo file in Middlebury format"""
         | 
| 20 | 
            +
                # Code adapted from:
         | 
| 21 | 
            +
                # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                # WARNING: this will work on little-endian architectures (eg Intel x86) only!
         | 
| 24 | 
            +
                # print 'fn = %s'%(fn)
         | 
| 25 | 
            +
                with open(fn, 'rb') as f:
         | 
| 26 | 
            +
                    magic = np.fromfile(f, np.float32, count=1)
         | 
| 27 | 
            +
                    if 202021.25 != magic:
         | 
| 28 | 
            +
                        print('Magic number incorrect. Invalid .flo file')
         | 
| 29 | 
            +
                        return None
         | 
| 30 | 
            +
                    else:
         | 
| 31 | 
            +
                        w = np.fromfile(f, np.int32, count=1)
         | 
| 32 | 
            +
                        h = np.fromfile(f, np.int32, count=1)
         | 
| 33 | 
            +
                        # print 'Reading %d x %d flo file\n' % (w, h)
         | 
| 34 | 
            +
                        data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
         | 
| 35 | 
            +
                        # Reshape data into 3D array (columns, rows, bands)
         | 
| 36 | 
            +
                        # The reshape here is for visualization, the original code is (w,h,2)
         | 
| 37 | 
            +
                        return np.resize(data, (int(h), int(w), 2))
         | 
| 38 | 
            +
             | 
| 39 | 
            +
            def readPFM(file):
         | 
| 40 | 
            +
                file = open(file, 'rb')
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                color = None
         | 
| 43 | 
            +
                width = None
         | 
| 44 | 
            +
                height = None
         | 
| 45 | 
            +
                scale = None
         | 
| 46 | 
            +
                endian = None
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                header = file.readline().rstrip()
         | 
| 49 | 
            +
                if header == b'PF':
         | 
| 50 | 
            +
                    color = True
         | 
| 51 | 
            +
                elif header == b'Pf':
         | 
| 52 | 
            +
                    color = False
         | 
| 53 | 
            +
                else:
         | 
| 54 | 
            +
                    raise Exception('Not a PFM file.')
         | 
| 55 | 
            +
             | 
| 56 | 
            +
                try:
         | 
| 57 | 
            +
                    dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline())
         | 
| 58 | 
            +
                except:
         | 
| 59 | 
            +
                    dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline())
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                if dim_match:
         | 
| 62 | 
            +
                    width, height = map(int, dim_match.groups())
         | 
| 63 | 
            +
                else:
         | 
| 64 | 
            +
                    raise Exception('Malformed PFM header.')
         | 
| 65 | 
            +
             | 
| 66 | 
            +
                scale = float(file.readline().rstrip())
         | 
| 67 | 
            +
                if scale < 0: # little-endian
         | 
| 68 | 
            +
                    endian = '<'
         | 
| 69 | 
            +
                    scale = -scale
         | 
| 70 | 
            +
                else:
         | 
| 71 | 
            +
                    endian = '>' # big-endian
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                data = np.fromfile(file, endian + 'f')
         | 
| 74 | 
            +
                shape = (height, width, 3) if color else (height, width)
         | 
| 75 | 
            +
             | 
| 76 | 
            +
                data = np.reshape(data, shape)
         | 
| 77 | 
            +
                data = np.flipud(data)
         | 
| 78 | 
            +
                return data
         | 
| 79 | 
            +
             | 
| 80 | 
            +
             | 
| 81 | 
            +
            def writeFlow(filename,uv,v=None):
         | 
| 82 | 
            +
                """ Write optical flow to file.
         | 
| 83 | 
            +
                
         | 
| 84 | 
            +
                If v is None, uv is assumed to contain both u and v channels,
         | 
| 85 | 
            +
                stacked in depth.
         | 
| 86 | 
            +
                Original code by Deqing Sun, adapted from Daniel Scharstein.
         | 
| 87 | 
            +
                """
         | 
| 88 | 
            +
                nBands = 2
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                if v is None:
         | 
| 91 | 
            +
                    assert(uv.ndim == 3)
         | 
| 92 | 
            +
                    assert(uv.shape[2] == 2)
         | 
| 93 | 
            +
                    u = uv[:,:,0]
         | 
| 94 | 
            +
                    v = uv[:,:,1]
         | 
| 95 | 
            +
                else:
         | 
| 96 | 
            +
                    u = uv
         | 
| 97 | 
            +
             | 
| 98 | 
            +
                assert(u.shape == v.shape)
         | 
| 99 | 
            +
                height,width = u.shape
         | 
| 100 | 
            +
                f = open(filename,'wb')
         | 
| 101 | 
            +
                # write the header
         | 
| 102 | 
            +
                f.write(TAG_CHAR)
         | 
| 103 | 
            +
                np.array(width).astype(np.int32).tofile(f)
         | 
| 104 | 
            +
                np.array(height).astype(np.int32).tofile(f)
         | 
| 105 | 
            +
                # arrange into matrix form
         | 
| 106 | 
            +
                tmp = np.zeros((height, width*nBands))
         | 
| 107 | 
            +
                tmp[:,np.arange(width)*2] = u
         | 
| 108 | 
            +
                tmp[:,np.arange(width)*2 + 1] = v
         | 
| 109 | 
            +
                tmp.astype(np.float32).tofile(f)
         | 
| 110 | 
            +
                f.close()
         | 
| 111 | 
            +
             | 
| 112 | 
            +
             | 
| 113 | 
            +
            def readDPT(filename):
         | 
| 114 | 
            +
                """ Read depth data from file, return as numpy array. """
         | 
| 115 | 
            +
                f = open(filename,'rb')
         | 
| 116 | 
            +
                check = np.fromfile(f,dtype=np.float32,count=1)[0]
         | 
| 117 | 
            +
                TAG_FLOAT = 202021.25
         | 
| 118 | 
            +
                TAG_CHAR = 'PIEH'
         | 
| 119 | 
            +
                assert check == TAG_FLOAT, ' depth_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check)
         | 
| 120 | 
            +
                width = np.fromfile(f,dtype=np.int32,count=1)[0]
         | 
| 121 | 
            +
                height = np.fromfile(f,dtype=np.int32,count=1)[0]
         | 
| 122 | 
            +
                size = width*height
         | 
| 123 | 
            +
                assert width > 0 and height > 0 and size > 1 and size < 100000000, ' depth_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height)
         | 
| 124 | 
            +
                depth = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width))
         | 
| 125 | 
            +
                return depth
         | 
| 126 | 
            +
             | 
| 127 | 
            +
            def cam_read(filename):
         | 
| 128 | 
            +
                """ Read camera data, return (M,N) tuple.
         | 
| 129 | 
            +
                M is the intrinsic matrix, N is the extrinsic matrix, so that
         | 
| 130 | 
            +
                x = M*N*X,
         | 
| 131 | 
            +
                with x being a point in homogeneous image pixel coordinates, X being a
         | 
| 132 | 
            +
                point in homogeneous world coordinates."""
         | 
| 133 | 
            +
                f = open(filename,'rb')
         | 
| 134 | 
            +
                check = np.fromfile(f,dtype=np.float32,count=1)[0]
         | 
| 135 | 
            +
                M = np.fromfile(f,dtype='float64',count=9).reshape((3,3))
         | 
| 136 | 
            +
                N = np.fromfile(f,dtype='float64',count=12).reshape((3,4))
         | 
| 137 | 
            +
             | 
| 138 | 
            +
                E = np.eye(4)
         | 
| 139 | 
            +
                E[0:3,:] = N
         | 
| 140 | 
            +
             | 
| 141 | 
            +
                fx, fy, cx, cy = M[0,0], M[1,1], M[0,2], M[1,2]
         | 
| 142 | 
            +
                kvec = np.array([fx, fy, cx, cy])
         | 
| 143 | 
            +
             | 
| 144 | 
            +
                q = Rotation.from_matrix(E[:3,:3]).as_quat()
         | 
| 145 | 
            +
                pvec = np.concatenate([E[:3,3], q], 0)
         | 
| 146 | 
            +
             | 
| 147 | 
            +
                return pvec, kvec
         | 
| 148 | 
            +
             | 
| 149 | 
            +
             | 
| 150 | 
            +
            def read_gen(file_name, pil=False):
         | 
| 151 | 
            +
                ext = splitext(file_name)[-1]
         | 
| 152 | 
            +
                if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
         | 
| 153 | 
            +
                    return Image.open(file_name)
         | 
| 154 | 
            +
                elif ext == '.bin' or ext == '.raw':
         | 
| 155 | 
            +
                    return np.load(file_name)
         | 
| 156 | 
            +
                elif ext == '.flo':
         | 
| 157 | 
            +
                    return readFlow(file_name).astype(np.float32)
         | 
| 158 | 
            +
                elif ext == '.pfm':
         | 
| 159 | 
            +
                    return readPFM(file_name).astype(np.float32)
         | 
| 160 | 
            +
                elif ext == '.dpt':
         | 
| 161 | 
            +
                    return readDPT(file_name).astype(np.float32)
         | 
| 162 | 
            +
                elif ext == '.cam':
         | 
| 163 | 
            +
                    return cam_read(file_name)
         | 
| 164 | 
            +
                return []
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/rgbd_utils.py
    ADDED
    
    | @@ -0,0 +1,188 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import numpy as np
         | 
| 2 | 
            +
            import os.path as osp
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            import torch
         | 
| 5 | 
            +
            from ..lietorch import SE3
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            from scipy.spatial.transform import Rotation
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            def parse_list(filepath, skiprows=0):
         | 
| 10 | 
            +
                """ read list data """
         | 
| 11 | 
            +
                data = np.loadtxt(filepath, delimiter=' ', dtype=np.unicode_, skiprows=skiprows)
         | 
| 12 | 
            +
                return data
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            def associate_frames(tstamp_image, tstamp_depth, tstamp_pose, max_dt=1.0):
         | 
| 15 | 
            +
                """ pair images, depths, and poses """
         | 
| 16 | 
            +
                associations = []
         | 
| 17 | 
            +
                for i, t in enumerate(tstamp_image):
         | 
| 18 | 
            +
                    if tstamp_pose is None:
         | 
| 19 | 
            +
                        j = np.argmin(np.abs(tstamp_depth - t))
         | 
| 20 | 
            +
                        if (np.abs(tstamp_depth[j] - t) < max_dt):
         | 
| 21 | 
            +
                            associations.append((i, j))
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                    else:
         | 
| 24 | 
            +
                        j = np.argmin(np.abs(tstamp_depth - t))
         | 
| 25 | 
            +
                        k = np.argmin(np.abs(tstamp_pose - t))
         | 
| 26 | 
            +
                    
         | 
| 27 | 
            +
                        if (np.abs(tstamp_depth[j] - t) < max_dt) and \
         | 
| 28 | 
            +
                                (np.abs(tstamp_pose[k] - t) < max_dt):
         | 
| 29 | 
            +
                            associations.append((i, j, k))
         | 
| 30 | 
            +
                        
         | 
| 31 | 
            +
                return associations
         | 
| 32 | 
            +
             | 
| 33 | 
            +
            def loadtum(datapath, frame_rate=-1):
         | 
| 34 | 
            +
                """ read video data in tum-rgbd format """
         | 
| 35 | 
            +
                if osp.isfile(osp.join(datapath, 'groundtruth.txt')):
         | 
| 36 | 
            +
                    pose_list = osp.join(datapath, 'groundtruth.txt')
         | 
| 37 | 
            +
                
         | 
| 38 | 
            +
                elif osp.isfile(osp.join(datapath, 'pose.txt')):
         | 
| 39 | 
            +
                    pose_list = osp.join(datapath, 'pose.txt')
         | 
| 40 | 
            +
             | 
| 41 | 
            +
                else:
         | 
| 42 | 
            +
                    return None, None, None, None
         | 
| 43 | 
            +
             | 
| 44 | 
            +
                image_list = osp.join(datapath, 'rgb.txt')
         | 
| 45 | 
            +
                depth_list = osp.join(datapath, 'depth.txt')
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                calib_path = osp.join(datapath, 'calibration.txt')
         | 
| 48 | 
            +
                intrinsic = None
         | 
| 49 | 
            +
                if osp.isfile(calib_path):
         | 
| 50 | 
            +
                    intrinsic = np.loadtxt(calib_path, delimiter=' ')
         | 
| 51 | 
            +
                    intrinsic = intrinsic.astype(np.float64)
         | 
| 52 | 
            +
             | 
| 53 | 
            +
                image_data = parse_list(image_list)
         | 
| 54 | 
            +
                depth_data = parse_list(depth_list)
         | 
| 55 | 
            +
                pose_data = parse_list(pose_list, skiprows=1)
         | 
| 56 | 
            +
                pose_vecs = pose_data[:,1:].astype(np.float64)
         | 
| 57 | 
            +
             | 
| 58 | 
            +
                tstamp_image = image_data[:,0].astype(np.float64)
         | 
| 59 | 
            +
                tstamp_depth = depth_data[:,0].astype(np.float64)
         | 
| 60 | 
            +
                tstamp_pose = pose_data[:,0].astype(np.float64)
         | 
| 61 | 
            +
                associations = associate_frames(tstamp_image, tstamp_depth, tstamp_pose)
         | 
| 62 | 
            +
             | 
| 63 | 
            +
                # print(len(tstamp_image))
         | 
| 64 | 
            +
                # print(len(associations))
         | 
| 65 | 
            +
             | 
| 66 | 
            +
                indicies = range(len(associations))[::5]
         | 
| 67 | 
            +
             | 
| 68 | 
            +
                # indicies = [ 0 ]
         | 
| 69 | 
            +
                # for i in range(1, len(associations)):
         | 
| 70 | 
            +
                #     t0 = tstamp_image[associations[indicies[-1]][0]]
         | 
| 71 | 
            +
                #     t1 = tstamp_image[associations[i][0]]
         | 
| 72 | 
            +
                #     if t1 - t0 > 1.0 / frame_rate:
         | 
| 73 | 
            +
                #         indicies += [ i ]
         | 
| 74 | 
            +
             | 
| 75 | 
            +
                images, poses, depths, intrinsics, tstamps = [], [], [], [], []
         | 
| 76 | 
            +
                for ix in indicies:
         | 
| 77 | 
            +
                    (i, j, k) = associations[ix]
         | 
| 78 | 
            +
                    images += [ osp.join(datapath, image_data[i,1]) ]
         | 
| 79 | 
            +
                    depths += [ osp.join(datapath, depth_data[j,1]) ]
         | 
| 80 | 
            +
                    poses += [ pose_vecs[k] ]
         | 
| 81 | 
            +
                    tstamps += [ tstamp_image[i] ]
         | 
| 82 | 
            +
                    
         | 
| 83 | 
            +
                    if intrinsic is not None:
         | 
| 84 | 
            +
                        intrinsics += [ intrinsic ]
         | 
| 85 | 
            +
             | 
| 86 | 
            +
                return images, depths, poses, intrinsics, tstamps
         | 
| 87 | 
            +
             | 
| 88 | 
            +
             | 
| 89 | 
            +
            def all_pairs_distance_matrix(poses, beta=2.5):
         | 
| 90 | 
            +
                """ compute distance matrix between all pairs of poses """
         | 
| 91 | 
            +
                poses = np.array(poses, dtype=np.float32)
         | 
| 92 | 
            +
                poses[:,:3] *= beta # scale to balence rot + trans
         | 
| 93 | 
            +
                poses = SE3(torch.from_numpy(poses))
         | 
| 94 | 
            +
             | 
| 95 | 
            +
                r = (poses[:,None].inv() * poses[None,:]).log()
         | 
| 96 | 
            +
                return r.norm(dim=-1).cpu().numpy()
         | 
| 97 | 
            +
             | 
| 98 | 
            +
            def pose_matrix_to_quaternion(pose):
         | 
| 99 | 
            +
                """ convert 4x4 pose matrix to (t, q) """
         | 
| 100 | 
            +
                q = Rotation.from_matrix(pose[:3, :3]).as_quat()
         | 
| 101 | 
            +
                return np.concatenate([pose[:3, 3], q], axis=0)
         | 
| 102 | 
            +
             | 
| 103 | 
            +
            def compute_distance_matrix_flow(poses, disps, intrinsics):
         | 
| 104 | 
            +
                """ compute flow magnitude between all pairs of frames """
         | 
| 105 | 
            +
                if not isinstance(poses, SE3):
         | 
| 106 | 
            +
                    poses = torch.from_numpy(poses).float().cuda()[None]
         | 
| 107 | 
            +
                    poses = SE3(poses).inv()
         | 
| 108 | 
            +
             | 
| 109 | 
            +
                    disps = torch.from_numpy(disps).float().cuda()[None]
         | 
| 110 | 
            +
                    intrinsics = torch.from_numpy(intrinsics).float().cuda()[None]
         | 
| 111 | 
            +
             | 
| 112 | 
            +
                N = poses.shape[1]
         | 
| 113 | 
            +
                
         | 
| 114 | 
            +
                ii, jj = torch.meshgrid(torch.arange(N), torch.arange(N))
         | 
| 115 | 
            +
                ii = ii.reshape(-1).cuda()
         | 
| 116 | 
            +
                jj = jj.reshape(-1).cuda()
         | 
| 117 | 
            +
             | 
| 118 | 
            +
                MAX_FLOW = 100.0
         | 
| 119 | 
            +
                matrix = np.zeros((N, N), dtype=np.float32)
         | 
| 120 | 
            +
             | 
| 121 | 
            +
                s = 2048
         | 
| 122 | 
            +
                for i in range(0, ii.shape[0], s):
         | 
| 123 | 
            +
                    flow1, val1 = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s])
         | 
| 124 | 
            +
                    flow2, val2 = pops.induced_flow(poses, disps, intrinsics, jj[i:i+s], ii[i:i+s])
         | 
| 125 | 
            +
                    
         | 
| 126 | 
            +
                    flow = torch.stack([flow1, flow2], dim=2)
         | 
| 127 | 
            +
                    val = torch.stack([val1, val2], dim=2)
         | 
| 128 | 
            +
                    
         | 
| 129 | 
            +
                    mag = flow.norm(dim=-1).clamp(max=MAX_FLOW)
         | 
| 130 | 
            +
                    mag = mag.view(mag.shape[1], -1)
         | 
| 131 | 
            +
                    val = val.view(val.shape[1], -1)
         | 
| 132 | 
            +
             | 
| 133 | 
            +
                    mag = (mag * val).mean(-1) / val.mean(-1)
         | 
| 134 | 
            +
                    mag[val.mean(-1) < 0.7] = np.inf
         | 
| 135 | 
            +
             | 
| 136 | 
            +
                    i1 = ii[i:i+s].cpu().numpy()
         | 
| 137 | 
            +
                    j1 = jj[i:i+s].cpu().numpy()
         | 
| 138 | 
            +
                    matrix[i1, j1] = mag.cpu().numpy()
         | 
| 139 | 
            +
             | 
| 140 | 
            +
                return matrix
         | 
| 141 | 
            +
             | 
| 142 | 
            +
             | 
| 143 | 
            +
            def compute_distance_matrix_flow2(poses, disps, intrinsics, beta=0.4):
         | 
| 144 | 
            +
                """ compute flow magnitude between all pairs of frames """
         | 
| 145 | 
            +
                # if not isinstance(poses, SE3):
         | 
| 146 | 
            +
                #     poses = torch.from_numpy(poses).float().cuda()[None]
         | 
| 147 | 
            +
                #     poses = SE3(poses).inv()
         | 
| 148 | 
            +
             | 
| 149 | 
            +
                #     disps = torch.from_numpy(disps).float().cuda()[None]
         | 
| 150 | 
            +
                #     intrinsics = torch.from_numpy(intrinsics).float().cuda()[None]
         | 
| 151 | 
            +
             | 
| 152 | 
            +
                N = poses.shape[1]
         | 
| 153 | 
            +
                
         | 
| 154 | 
            +
                ii, jj = torch.meshgrid(torch.arange(N), torch.arange(N))
         | 
| 155 | 
            +
                ii = ii.reshape(-1)
         | 
| 156 | 
            +
                jj = jj.reshape(-1)
         | 
| 157 | 
            +
             | 
| 158 | 
            +
                MAX_FLOW = 128.0
         | 
| 159 | 
            +
                matrix = np.zeros((N, N), dtype=np.float32)
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                s = 2048
         | 
| 162 | 
            +
                for i in range(0, ii.shape[0], s):
         | 
| 163 | 
            +
                    flow1a, val1a = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s], tonly=True)
         | 
| 164 | 
            +
                    flow1b, val1b = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s])
         | 
| 165 | 
            +
                    flow2a, val2a = pops.induced_flow(poses, disps, intrinsics, jj[i:i+s], ii[i:i+s], tonly=True)
         | 
| 166 | 
            +
                    flow2b, val2b = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s])
         | 
| 167 | 
            +
             | 
| 168 | 
            +
                    flow1 = flow1a + beta * flow1b
         | 
| 169 | 
            +
                    val1 = val1a * val2b
         | 
| 170 | 
            +
             | 
| 171 | 
            +
                    flow2 = flow2a + beta * flow2b
         | 
| 172 | 
            +
                    val2 = val2a * val2b
         | 
| 173 | 
            +
                    
         | 
| 174 | 
            +
                    flow = torch.stack([flow1, flow2], dim=2)
         | 
| 175 | 
            +
                    val = torch.stack([val1, val2], dim=2)
         | 
| 176 | 
            +
                    
         | 
| 177 | 
            +
                    mag = flow.norm(dim=-1).clamp(max=MAX_FLOW)
         | 
| 178 | 
            +
                    mag = mag.view(mag.shape[1], -1)
         | 
| 179 | 
            +
                    val = val.view(val.shape[1], -1)
         | 
| 180 | 
            +
             | 
| 181 | 
            +
                    mag = (mag * val).mean(-1) / val.mean(-1)
         | 
| 182 | 
            +
                    mag[val.mean(-1) < 0.8] = np.inf
         | 
| 183 | 
            +
             | 
| 184 | 
            +
                    i1 = ii[i:i+s].cpu().numpy()
         | 
| 185 | 
            +
                    j1 = jj[i:i+s].cpu().numpy()
         | 
| 186 | 
            +
                    matrix[i1, j1] = mag.cpu().numpy()
         | 
| 187 | 
            +
             | 
| 188 | 
            +
                return matrix
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/tartan.py
    ADDED
    
    | @@ -0,0 +1,110 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
             | 
| 2 | 
            +
            import numpy as np
         | 
| 3 | 
            +
            import torch
         | 
| 4 | 
            +
            import glob
         | 
| 5 | 
            +
            import cv2
         | 
| 6 | 
            +
            import os
         | 
| 7 | 
            +
            import os.path as osp
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            from ..lietorch import SE3
         | 
| 10 | 
            +
            from .base import RGBDDataset
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            # cur_path = osp.dirname(osp.abspath(__file__))
         | 
| 13 | 
            +
            # test_split = osp.join(cur_path, 'tartan_test.txt')
         | 
| 14 | 
            +
            # test_split = open(test_split).read().split()
         | 
| 15 | 
            +
             | 
| 16 | 
            +
             | 
| 17 | 
            +
            test_split = [
         | 
| 18 | 
            +
                "abandonedfactory/abandonedfactory/Easy/P011",
         | 
| 19 | 
            +
                "abandonedfactory/abandonedfactory/Hard/P011",
         | 
| 20 | 
            +
                "abandonedfactory_night/abandonedfactory_night/Easy/P013",
         | 
| 21 | 
            +
                "abandonedfactory_night/abandonedfactory_night/Hard/P014",
         | 
| 22 | 
            +
                "amusement/amusement/Easy/P008",
         | 
| 23 | 
            +
                "amusement/amusement/Hard/P007",
         | 
| 24 | 
            +
                "carwelding/carwelding/Easy/P007",
         | 
| 25 | 
            +
                "endofworld/endofworld/Easy/P009",
         | 
| 26 | 
            +
                "gascola/gascola/Easy/P008",
         | 
| 27 | 
            +
                "gascola/gascola/Hard/P009",
         | 
| 28 | 
            +
                "hospital/hospital/Easy/P036",
         | 
| 29 | 
            +
                "hospital/hospital/Hard/P049",
         | 
| 30 | 
            +
                "japanesealley/japanesealley/Easy/P007",
         | 
| 31 | 
            +
                "japanesealley/japanesealley/Hard/P005",
         | 
| 32 | 
            +
                "neighborhood/neighborhood/Easy/P021",
         | 
| 33 | 
            +
                "neighborhood/neighborhood/Hard/P017",
         | 
| 34 | 
            +
                "ocean/ocean/Easy/P013",
         | 
| 35 | 
            +
                "ocean/ocean/Hard/P009",
         | 
| 36 | 
            +
                "office2/office2/Easy/P011",
         | 
| 37 | 
            +
                "office2/office2/Hard/P010",
         | 
| 38 | 
            +
                "office/office/Hard/P007",
         | 
| 39 | 
            +
                "oldtown/oldtown/Easy/P007",
         | 
| 40 | 
            +
                "oldtown/oldtown/Hard/P008",
         | 
| 41 | 
            +
                "seasidetown/seasidetown/Easy/P009",
         | 
| 42 | 
            +
                "seasonsforest/seasonsforest/Easy/P011",
         | 
| 43 | 
            +
                "seasonsforest/seasonsforest/Hard/P006",
         | 
| 44 | 
            +
                "seasonsforest_winter/seasonsforest_winter/Easy/P009",
         | 
| 45 | 
            +
                "seasonsforest_winter/seasonsforest_winter/Hard/P018",
         | 
| 46 | 
            +
                "soulcity/soulcity/Easy/P012",
         | 
| 47 | 
            +
                "soulcity/soulcity/Hard/P009",
         | 
| 48 | 
            +
                "westerndesert/westerndesert/Easy/P013",
         | 
| 49 | 
            +
                "westerndesert/westerndesert/Hard/P007",
         | 
| 50 | 
            +
            ]
         | 
| 51 | 
            +
             | 
| 52 | 
            +
             | 
| 53 | 
            +
            class TartanAir(RGBDDataset):
         | 
| 54 | 
            +
             | 
| 55 | 
            +
                # scale depths to balance rot & trans
         | 
| 56 | 
            +
                DEPTH_SCALE = 5.0
         | 
| 57 | 
            +
             | 
| 58 | 
            +
                def __init__(self, mode='training', **kwargs):
         | 
| 59 | 
            +
                    self.mode = mode
         | 
| 60 | 
            +
                    self.n_frames = 2
         | 
| 61 | 
            +
                    super(TartanAir, self).__init__(name='TartanAir', **kwargs)
         | 
| 62 | 
            +
             | 
| 63 | 
            +
                @staticmethod 
         | 
| 64 | 
            +
                def is_test_scene(scene):
         | 
| 65 | 
            +
                    # print(scene, any(x in scene for x in test_split))
         | 
| 66 | 
            +
                    return any(x in scene for x in test_split)
         | 
| 67 | 
            +
             | 
| 68 | 
            +
                def _build_dataset(self):
         | 
| 69 | 
            +
                    from tqdm import tqdm
         | 
| 70 | 
            +
                    print("Building TartanAir dataset")
         | 
| 71 | 
            +
             | 
| 72 | 
            +
                    scene_info = {}
         | 
| 73 | 
            +
                    scenes = glob.glob(osp.join(self.root, '*/*/*/*'))
         | 
| 74 | 
            +
                    for scene in tqdm(sorted(scenes)):
         | 
| 75 | 
            +
                        images = sorted(glob.glob(osp.join(scene, 'image_left/*.png')))
         | 
| 76 | 
            +
                        depths = sorted(glob.glob(osp.join(scene, 'depth_left/*.npy')))
         | 
| 77 | 
            +
             | 
| 78 | 
            +
                        if len(images) != len(depths):
         | 
| 79 | 
            +
                            continue
         | 
| 80 | 
            +
                        
         | 
| 81 | 
            +
                        poses = np.loadtxt(osp.join(scene, 'pose_left.txt'), delimiter=' ')
         | 
| 82 | 
            +
                        poses = poses[:, [1, 2, 0, 4, 5, 3, 6]]
         | 
| 83 | 
            +
                        poses[:,:3] /= TartanAir.DEPTH_SCALE
         | 
| 84 | 
            +
                        intrinsics = [TartanAir.calib_read()] * len(images)
         | 
| 85 | 
            +
             | 
| 86 | 
            +
                        # graph of co-visible frames based on flow
         | 
| 87 | 
            +
                        graph = self.build_frame_graph(poses, depths, intrinsics)
         | 
| 88 | 
            +
             | 
| 89 | 
            +
                        scene = '/'.join(scene.split('/'))
         | 
| 90 | 
            +
                        scene_info[scene] = {'images': images, 'depths': depths, 
         | 
| 91 | 
            +
                            'poses': poses, 'intrinsics': intrinsics, 'graph': graph}
         | 
| 92 | 
            +
             | 
| 93 | 
            +
                    return scene_info
         | 
| 94 | 
            +
             | 
| 95 | 
            +
                @staticmethod
         | 
| 96 | 
            +
                def calib_read():
         | 
| 97 | 
            +
                    return np.array([320.0, 320.0, 320.0, 240.0])
         | 
| 98 | 
            +
             | 
| 99 | 
            +
                @staticmethod
         | 
| 100 | 
            +
                def image_read(image_file):
         | 
| 101 | 
            +
                    return cv2.imread(image_file)
         | 
| 102 | 
            +
             | 
| 103 | 
            +
                @staticmethod
         | 
| 104 | 
            +
                def depth_read(depth_file):
         | 
| 105 | 
            +
                    depth = np.load(depth_file) / TartanAir.DEPTH_SCALE
         | 
| 106 | 
            +
                    depth[depth==np.nan] = 1.0
         | 
| 107 | 
            +
                    depth[depth==np.inf] = 1.0
         | 
| 108 | 
            +
                    return depth
         | 
| 109 | 
            +
             | 
| 110 | 
            +
             | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/dpvo.py
    ADDED
    
    | @@ -0,0 +1,402 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import numpy as np
         | 
| 3 | 
            +
            import torch.nn.functional as F
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            from . import fastba
         | 
| 6 | 
            +
            from . import altcorr
         | 
| 7 | 
            +
            from . import lietorch
         | 
| 8 | 
            +
            from .lietorch import SE3
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            from .net import VONet
         | 
| 11 | 
            +
            from .utils import *
         | 
| 12 | 
            +
            from . import projective_ops as pops
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            autocast = torch.cuda.amp.autocast
         | 
| 15 | 
            +
            Id = SE3.Identity(1, device="cuda")
         | 
| 16 | 
            +
             | 
| 17 | 
            +
             | 
| 18 | 
            +
            class DPVO:
         | 
| 19 | 
            +
                def __init__(self, cfg, network, ht=480, wd=640, viz=False):
         | 
| 20 | 
            +
                    self.cfg = cfg
         | 
| 21 | 
            +
                    self.load_weights(network)
         | 
| 22 | 
            +
                    self.is_initialized = False
         | 
| 23 | 
            +
                    self.enable_timing = False
         | 
| 24 | 
            +
                    
         | 
| 25 | 
            +
                    self.n = 0      # number of frames
         | 
| 26 | 
            +
                    self.m = 0      # number of patches
         | 
| 27 | 
            +
                    self.M = self.cfg.PATCHES_PER_FRAME
         | 
| 28 | 
            +
                    self.N = self.cfg.BUFFER_SIZE
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                    self.ht = ht    # image height
         | 
| 31 | 
            +
                    self.wd = wd    # image width
         | 
| 32 | 
            +
             | 
| 33 | 
            +
                    DIM = self.DIM
         | 
| 34 | 
            +
                    RES = self.RES
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                    ### state attributes ###
         | 
| 37 | 
            +
                    self.tlist = []
         | 
| 38 | 
            +
                    self.counter = 0
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                    # dummy image for visualization
         | 
| 41 | 
            +
                    self.image_ = torch.zeros(self.ht, self.wd, 3, dtype=torch.uint8, device="cpu")
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                    self.tstamps_ = torch.zeros(self.N, dtype=torch.long, device="cuda")
         | 
| 44 | 
            +
                    self.poses_ = torch.zeros(self.N, 7, dtype=torch.float, device="cuda")
         | 
| 45 | 
            +
                    self.patches_ = torch.zeros(self.N, self.M, 3, self.P, self.P, dtype=torch.float, device="cuda")
         | 
| 46 | 
            +
                    self.intrinsics_ = torch.zeros(self.N, 4, dtype=torch.float, device="cuda")
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                    self.points_ = torch.zeros(self.N * self.M, 3, dtype=torch.float, device="cuda")
         | 
| 49 | 
            +
                    self.colors_ = torch.zeros(self.N, self.M, 3, dtype=torch.uint8, device="cuda")
         | 
| 50 | 
            +
             | 
| 51 | 
            +
                    self.index_ = torch.zeros(self.N, self.M, dtype=torch.long, device="cuda")
         | 
| 52 | 
            +
                    self.index_map_ = torch.zeros(self.N, dtype=torch.long, device="cuda")
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                    ### network attributes ###
         | 
| 55 | 
            +
                    self.mem = 32
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                    if self.cfg.MIXED_PRECISION:
         | 
| 58 | 
            +
                        self.kwargs = kwargs = {"device": "cuda", "dtype": torch.half}
         | 
| 59 | 
            +
                    else:
         | 
| 60 | 
            +
                        self.kwargs = kwargs = {"device": "cuda", "dtype": torch.float}
         | 
| 61 | 
            +
                    
         | 
| 62 | 
            +
                    self.imap_ = torch.zeros(self.mem, self.M, DIM, **kwargs)
         | 
| 63 | 
            +
                    self.gmap_ = torch.zeros(self.mem, self.M, 128, self.P, self.P, **kwargs)
         | 
| 64 | 
            +
             | 
| 65 | 
            +
                    ht = ht // RES
         | 
| 66 | 
            +
                    wd = wd // RES
         | 
| 67 | 
            +
             | 
| 68 | 
            +
                    self.fmap1_ = torch.zeros(1, self.mem, 128, ht // 1, wd // 1, **kwargs)
         | 
| 69 | 
            +
                    self.fmap2_ = torch.zeros(1, self.mem, 128, ht // 4, wd // 4, **kwargs)
         | 
| 70 | 
            +
             | 
| 71 | 
            +
                    # feature pyramid
         | 
| 72 | 
            +
                    self.pyramid = (self.fmap1_, self.fmap2_)
         | 
| 73 | 
            +
             | 
| 74 | 
            +
                    self.net = torch.zeros(1, 0, DIM, **kwargs)
         | 
| 75 | 
            +
                    self.ii = torch.as_tensor([], dtype=torch.long, device="cuda")
         | 
| 76 | 
            +
                    self.jj = torch.as_tensor([], dtype=torch.long, device="cuda")
         | 
| 77 | 
            +
                    self.kk = torch.as_tensor([], dtype=torch.long, device="cuda")
         | 
| 78 | 
            +
                    
         | 
| 79 | 
            +
                    # initialize poses to identity matrix
         | 
| 80 | 
            +
                    self.poses_[:,6] = 1.0
         | 
| 81 | 
            +
             | 
| 82 | 
            +
                    # store relative poses for removed frames
         | 
| 83 | 
            +
                    self.delta = {}
         | 
| 84 | 
            +
             | 
| 85 | 
            +
                    self.viewer = None
         | 
| 86 | 
            +
                    if viz:
         | 
| 87 | 
            +
                        self.start_viewer()
         | 
| 88 | 
            +
             | 
| 89 | 
            +
                def load_weights(self, network):
         | 
| 90 | 
            +
                    # load network from checkpoint file
         | 
| 91 | 
            +
                    if isinstance(network, str):
         | 
| 92 | 
            +
                        from collections import OrderedDict
         | 
| 93 | 
            +
                        state_dict = torch.load(network)
         | 
| 94 | 
            +
                        new_state_dict = OrderedDict()
         | 
| 95 | 
            +
                        for k, v in state_dict.items():
         | 
| 96 | 
            +
                            if "update.lmbda" not in k:
         | 
| 97 | 
            +
                                new_state_dict[k.replace('module.', '')] = v
         | 
| 98 | 
            +
                        
         | 
| 99 | 
            +
                        self.network = VONet()
         | 
| 100 | 
            +
                        self.network.load_state_dict(new_state_dict)
         | 
| 101 | 
            +
             | 
| 102 | 
            +
                    else:
         | 
| 103 | 
            +
                        self.network = network
         | 
| 104 | 
            +
             | 
| 105 | 
            +
                    # steal network attributes
         | 
| 106 | 
            +
                    self.DIM = self.network.DIM
         | 
| 107 | 
            +
                    self.RES = self.network.RES
         | 
| 108 | 
            +
                    self.P = self.network.P
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                    self.network.cuda()
         | 
| 111 | 
            +
                    self.network.eval()
         | 
| 112 | 
            +
             | 
| 113 | 
            +
                    # if self.cfg.MIXED_PRECISION:
         | 
| 114 | 
            +
                    #     self.network.half()
         | 
| 115 | 
            +
             | 
| 116 | 
            +
             | 
| 117 | 
            +
                def start_viewer(self):
         | 
| 118 | 
            +
                    from dpviewer import Viewer
         | 
| 119 | 
            +
             | 
| 120 | 
            +
                    intrinsics_ = torch.zeros(1, 4, dtype=torch.float32, device="cuda")
         | 
| 121 | 
            +
             | 
| 122 | 
            +
                    self.viewer = Viewer(
         | 
| 123 | 
            +
                        self.image_,
         | 
| 124 | 
            +
                        self.poses_,
         | 
| 125 | 
            +
                        self.points_,
         | 
| 126 | 
            +
                        self.colors_,
         | 
| 127 | 
            +
                        intrinsics_)
         | 
| 128 | 
            +
             | 
| 129 | 
            +
                @property
         | 
| 130 | 
            +
                def poses(self):
         | 
| 131 | 
            +
                    return self.poses_.view(1, self.N, 7)
         | 
| 132 | 
            +
             | 
| 133 | 
            +
                @property
         | 
| 134 | 
            +
                def patches(self):
         | 
| 135 | 
            +
                    return self.patches_.view(1, self.N*self.M, 3, 3, 3)
         | 
| 136 | 
            +
             | 
| 137 | 
            +
                @property
         | 
| 138 | 
            +
                def intrinsics(self):
         | 
| 139 | 
            +
                    return self.intrinsics_.view(1, self.N, 4)
         | 
| 140 | 
            +
             | 
| 141 | 
            +
                @property
         | 
| 142 | 
            +
                def ix(self):
         | 
| 143 | 
            +
                    return self.index_.view(-1)
         | 
| 144 | 
            +
             | 
| 145 | 
            +
                @property
         | 
| 146 | 
            +
                def imap(self):
         | 
| 147 | 
            +
                    return self.imap_.view(1, self.mem * self.M, self.DIM)
         | 
| 148 | 
            +
             | 
| 149 | 
            +
                @property
         | 
| 150 | 
            +
                def gmap(self):
         | 
| 151 | 
            +
                    return self.gmap_.view(1, self.mem * self.M, 128, 3, 3)
         | 
| 152 | 
            +
             | 
| 153 | 
            +
                def get_pose(self, t):
         | 
| 154 | 
            +
                    if t in self.traj:
         | 
| 155 | 
            +
                        return SE3(self.traj[t])
         | 
| 156 | 
            +
             | 
| 157 | 
            +
                    t0, dP = self.delta[t]
         | 
| 158 | 
            +
                    return dP * self.get_pose(t0)
         | 
| 159 | 
            +
             | 
| 160 | 
            +
                def terminate(self):
         | 
| 161 | 
            +
                    """ interpolate missing poses """
         | 
| 162 | 
            +
                    self.traj = {}
         | 
| 163 | 
            +
                    for i in range(self.n):
         | 
| 164 | 
            +
                        self.traj[self.tstamps_[i].item()] = self.poses_[i]
         | 
| 165 | 
            +
             | 
| 166 | 
            +
                    poses = [self.get_pose(t) for t in range(self.counter)]
         | 
| 167 | 
            +
                    poses = lietorch.stack(poses, dim=0)
         | 
| 168 | 
            +
                    poses = poses.inv().data.cpu().numpy()
         | 
| 169 | 
            +
                    tstamps = np.array(self.tlist, dtype=np.float)
         | 
| 170 | 
            +
             | 
| 171 | 
            +
                    if self.viewer is not None:
         | 
| 172 | 
            +
                        self.viewer.join()
         | 
| 173 | 
            +
             | 
| 174 | 
            +
                    return poses, tstamps
         | 
| 175 | 
            +
             | 
| 176 | 
            +
                def corr(self, coords, indicies=None):
         | 
| 177 | 
            +
                    """ local correlation volume """
         | 
| 178 | 
            +
                    ii, jj = indicies if indicies is not None else (self.kk, self.jj)
         | 
| 179 | 
            +
                    ii1 = ii % (self.M * self.mem)
         | 
| 180 | 
            +
                    jj1 = jj % (self.mem)
         | 
| 181 | 
            +
                    corr1 = altcorr.corr(self.gmap, self.pyramid[0], coords / 1, ii1, jj1, 3)
         | 
| 182 | 
            +
                    corr2 = altcorr.corr(self.gmap, self.pyramid[1], coords / 4, ii1, jj1, 3)
         | 
| 183 | 
            +
                    return torch.stack([corr1, corr2], -1).view(1, len(ii), -1)
         | 
| 184 | 
            +
             | 
| 185 | 
            +
                def reproject(self, indicies=None):
         | 
| 186 | 
            +
                    """ reproject patch k from i -> j """
         | 
| 187 | 
            +
                    (ii, jj, kk) = indicies if indicies is not None else (self.ii, self.jj, self.kk)
         | 
| 188 | 
            +
                    coords = pops.transform(SE3(self.poses), self.patches, self.intrinsics, ii, jj, kk)
         | 
| 189 | 
            +
                    return coords.permute(0, 1, 4, 2, 3).contiguous()
         | 
| 190 | 
            +
             | 
| 191 | 
            +
                def append_factors(self, ii, jj):
         | 
| 192 | 
            +
                    self.jj = torch.cat([self.jj, jj])
         | 
| 193 | 
            +
                    self.kk = torch.cat([self.kk, ii])
         | 
| 194 | 
            +
                    self.ii = torch.cat([self.ii, self.ix[ii]])
         | 
| 195 | 
            +
             | 
| 196 | 
            +
                    net = torch.zeros(1, len(ii), self.DIM, **self.kwargs)
         | 
| 197 | 
            +
                    self.net = torch.cat([self.net, net], dim=1)
         | 
| 198 | 
            +
             | 
| 199 | 
            +
                def remove_factors(self, m):
         | 
| 200 | 
            +
                    self.ii = self.ii[~m]
         | 
| 201 | 
            +
                    self.jj = self.jj[~m]
         | 
| 202 | 
            +
                    self.kk = self.kk[~m]
         | 
| 203 | 
            +
                    self.net = self.net[:,~m]
         | 
| 204 | 
            +
             | 
| 205 | 
            +
                def motion_probe(self):
         | 
| 206 | 
            +
                    """ kinda hacky way to ensure enough motion for initialization """
         | 
| 207 | 
            +
                    kk = torch.arange(self.m-self.M, self.m, device="cuda")
         | 
| 208 | 
            +
                    jj = self.n * torch.ones_like(kk)
         | 
| 209 | 
            +
                    ii = self.ix[kk]
         | 
| 210 | 
            +
             | 
| 211 | 
            +
                    net = torch.zeros(1, len(ii), self.DIM, **self.kwargs)
         | 
| 212 | 
            +
                    coords = self.reproject(indicies=(ii, jj, kk))
         | 
| 213 | 
            +
             | 
| 214 | 
            +
                    with autocast(enabled=self.cfg.MIXED_PRECISION):
         | 
| 215 | 
            +
                        corr = self.corr(coords, indicies=(kk, jj))
         | 
| 216 | 
            +
                        ctx = self.imap[:,kk % (self.M * self.mem)]
         | 
| 217 | 
            +
                        net, (delta, weight, _) = \
         | 
| 218 | 
            +
                            self.network.update(net, ctx, corr, None, ii, jj, kk)
         | 
| 219 | 
            +
             | 
| 220 | 
            +
                    return torch.quantile(delta.norm(dim=-1).float(), 0.5)
         | 
| 221 | 
            +
             | 
| 222 | 
            +
                def motionmag(self, i, j):
         | 
| 223 | 
            +
                    k = (self.ii == i) & (self.jj == j)
         | 
| 224 | 
            +
                    ii = self.ii[k]
         | 
| 225 | 
            +
                    jj = self.jj[k]
         | 
| 226 | 
            +
                    kk = self.kk[k]
         | 
| 227 | 
            +
             | 
| 228 | 
            +
                    flow = pops.flow_mag(SE3(self.poses), self.patches, self.intrinsics, ii, jj, kk, beta=0.5)
         | 
| 229 | 
            +
                    return flow.mean().item()
         | 
| 230 | 
            +
             | 
| 231 | 
            +
                def keyframe(self):
         | 
| 232 | 
            +
             | 
| 233 | 
            +
                    i = self.n - self.cfg.KEYFRAME_INDEX - 1
         | 
| 234 | 
            +
                    j = self.n - self.cfg.KEYFRAME_INDEX + 1
         | 
| 235 | 
            +
                    m = self.motionmag(i, j) + self.motionmag(j, i)
         | 
| 236 | 
            +
             
         | 
| 237 | 
            +
                    if m / 2 < self.cfg.KEYFRAME_THRESH:
         | 
| 238 | 
            +
                        k = self.n - self.cfg.KEYFRAME_INDEX
         | 
| 239 | 
            +
                        t0 = self.tstamps_[k-1].item()
         | 
| 240 | 
            +
                        t1 = self.tstamps_[k].item()
         | 
| 241 | 
            +
             | 
| 242 | 
            +
                        dP = SE3(self.poses_[k]) * SE3(self.poses_[k-1]).inv()
         | 
| 243 | 
            +
                        self.delta[t1] = (t0, dP)
         | 
| 244 | 
            +
             | 
| 245 | 
            +
                        to_remove = (self.ii == k) | (self.jj == k)
         | 
| 246 | 
            +
                        self.remove_factors(to_remove)
         | 
| 247 | 
            +
             | 
| 248 | 
            +
                        self.kk[self.ii > k] -= self.M
         | 
| 249 | 
            +
                        self.ii[self.ii > k] -= 1
         | 
| 250 | 
            +
                        self.jj[self.jj > k] -= 1
         | 
| 251 | 
            +
             | 
| 252 | 
            +
                        for i in range(k, self.n-1):
         | 
| 253 | 
            +
                            self.tstamps_[i] = self.tstamps_[i+1]
         | 
| 254 | 
            +
                            self.colors_[i] = self.colors_[i+1]
         | 
| 255 | 
            +
                            self.poses_[i] = self.poses_[i+1]
         | 
| 256 | 
            +
                            self.patches_[i] = self.patches_[i+1]
         | 
| 257 | 
            +
                            self.intrinsics_[i] = self.intrinsics_[i+1]
         | 
| 258 | 
            +
             | 
| 259 | 
            +
                            self.imap_[i%self.mem] = self.imap_[(i+1) % self.mem]
         | 
| 260 | 
            +
                            self.gmap_[i%self.mem] = self.gmap_[(i+1) % self.mem]
         | 
| 261 | 
            +
                            self.fmap1_[0,i%self.mem] = self.fmap1_[0,(i+1)%self.mem]
         | 
| 262 | 
            +
                            self.fmap2_[0,i%self.mem] = self.fmap2_[0,(i+1)%self.mem]
         | 
| 263 | 
            +
             | 
| 264 | 
            +
                        self.n -= 1
         | 
| 265 | 
            +
                        self.m-= self.M
         | 
| 266 | 
            +
             | 
| 267 | 
            +
                    to_remove = self.ix[self.kk] < self.n - self.cfg.REMOVAL_WINDOW
         | 
| 268 | 
            +
                    self.remove_factors(to_remove)
         | 
| 269 | 
            +
             | 
| 270 | 
            +
                def update(self):
         | 
| 271 | 
            +
                    with Timer("other", enabled=self.enable_timing):
         | 
| 272 | 
            +
                        coords = self.reproject()
         | 
| 273 | 
            +
             | 
| 274 | 
            +
                        with autocast(enabled=True):
         | 
| 275 | 
            +
                            corr = self.corr(coords)
         | 
| 276 | 
            +
                            ctx = self.imap[:,self.kk % (self.M * self.mem)]
         | 
| 277 | 
            +
                            self.net, (delta, weight, _) = \
         | 
| 278 | 
            +
                                self.network.update(self.net, ctx, corr, None, self.ii, self.jj, self.kk)
         | 
| 279 | 
            +
             | 
| 280 | 
            +
                        lmbda = torch.as_tensor([1e-4], device="cuda")
         | 
| 281 | 
            +
                        weight = weight.float()
         | 
| 282 | 
            +
                        target = coords[...,self.P//2,self.P//2] + delta.float()
         | 
| 283 | 
            +
             | 
| 284 | 
            +
                    with Timer("BA", enabled=self.enable_timing):
         | 
| 285 | 
            +
                        t0 = self.n - self.cfg.OPTIMIZATION_WINDOW if self.is_initialized else 1
         | 
| 286 | 
            +
                        t0 = max(t0, 1)
         | 
| 287 | 
            +
             | 
| 288 | 
            +
                        try:
         | 
| 289 | 
            +
                            fastba.BA(self.poses, self.patches, self.intrinsics, 
         | 
| 290 | 
            +
                                target, weight, lmbda, self.ii, self.jj, self.kk, t0, self.n, 2)
         | 
| 291 | 
            +
                        except:
         | 
| 292 | 
            +
                            print("Warning BA failed...")
         | 
| 293 | 
            +
                        
         | 
| 294 | 
            +
                        points = pops.point_cloud(SE3(self.poses), self.patches[:, :self.m], self.intrinsics, self.ix[:self.m])
         | 
| 295 | 
            +
                        points = (points[...,1,1,:3] / points[...,1,1,3:]).reshape(-1, 3)
         | 
| 296 | 
            +
                        self.points_[:len(points)] = points[:]
         | 
| 297 | 
            +
                            
         | 
| 298 | 
            +
                def __edges_all(self):
         | 
| 299 | 
            +
                    return flatmeshgrid(
         | 
| 300 | 
            +
                        torch.arange(0, self.m, device="cuda"),
         | 
| 301 | 
            +
                        torch.arange(0, self.n, device="cuda"), indexing='ij')
         | 
| 302 | 
            +
             | 
| 303 | 
            +
                def __edges_forw(self):
         | 
| 304 | 
            +
                    r=self.cfg.PATCH_LIFETIME
         | 
| 305 | 
            +
                    t0 = self.M * max((self.n - r), 0)
         | 
| 306 | 
            +
                    t1 = self.M * max((self.n - 1), 0)
         | 
| 307 | 
            +
                    return flatmeshgrid(
         | 
| 308 | 
            +
                        torch.arange(t0, t1, device="cuda"),
         | 
| 309 | 
            +
                        torch.arange(self.n-1, self.n, device="cuda"), indexing='ij')
         | 
| 310 | 
            +
             | 
| 311 | 
            +
                def __edges_back(self):
         | 
| 312 | 
            +
                    r=self.cfg.PATCH_LIFETIME
         | 
| 313 | 
            +
                    t0 = self.M * max((self.n - 1), 0)
         | 
| 314 | 
            +
                    t1 = self.M * max((self.n - 0), 0)
         | 
| 315 | 
            +
                    return flatmeshgrid(torch.arange(t0, t1, device="cuda"),
         | 
| 316 | 
            +
                        torch.arange(max(self.n-r, 0), self.n, device="cuda"), indexing='ij')
         | 
| 317 | 
            +
             | 
| 318 | 
            +
                def __call__(self, tstamp, image, intrinsics):
         | 
| 319 | 
            +
                    """ track new frame """
         | 
| 320 | 
            +
             | 
| 321 | 
            +
                    if (self.n+1) >= self.N:
         | 
| 322 | 
            +
                        raise Exception(f'The buffer size is too small. You can increase it using "--buffer {self.N*2}"')
         | 
| 323 | 
            +
             | 
| 324 | 
            +
                    if self.viewer is not None:
         | 
| 325 | 
            +
                        self.viewer.update_image(image)
         | 
| 326 | 
            +
             | 
| 327 | 
            +
                    image = 2 * (image[None,None] / 255.0) - 0.5
         | 
| 328 | 
            +
                    
         | 
| 329 | 
            +
                    with autocast(enabled=self.cfg.MIXED_PRECISION):
         | 
| 330 | 
            +
                        fmap, gmap, imap, patches, _, clr = \
         | 
| 331 | 
            +
                            self.network.patchify(image,
         | 
| 332 | 
            +
                                patches_per_image=self.cfg.PATCHES_PER_FRAME, 
         | 
| 333 | 
            +
                                gradient_bias=self.cfg.GRADIENT_BIAS, 
         | 
| 334 | 
            +
                                return_color=True)
         | 
| 335 | 
            +
             | 
| 336 | 
            +
                    ### update state attributes ###
         | 
| 337 | 
            +
                    self.tlist.append(tstamp)
         | 
| 338 | 
            +
                    self.tstamps_[self.n] = self.counter
         | 
| 339 | 
            +
                    self.intrinsics_[self.n] = intrinsics / self.RES
         | 
| 340 | 
            +
             | 
| 341 | 
            +
                    # color info for visualization
         | 
| 342 | 
            +
                    clr = (clr[0,:,[2,1,0]] + 0.5) * (255.0 / 2)
         | 
| 343 | 
            +
                    self.colors_[self.n] = clr.to(torch.uint8)
         | 
| 344 | 
            +
             | 
| 345 | 
            +
                    self.index_[self.n + 1] = self.n + 1
         | 
| 346 | 
            +
                    self.index_map_[self.n + 1] = self.m + self.M
         | 
| 347 | 
            +
             | 
| 348 | 
            +
                    if self.n > 1:
         | 
| 349 | 
            +
                        if self.cfg.MOTION_MODEL == 'DAMPED_LINEAR':
         | 
| 350 | 
            +
                            P1 = SE3(self.poses_[self.n-1])
         | 
| 351 | 
            +
                            P2 = SE3(self.poses_[self.n-2])
         | 
| 352 | 
            +
                            
         | 
| 353 | 
            +
                            xi = self.cfg.MOTION_DAMPING * (P1 * P2.inv()).log()
         | 
| 354 | 
            +
                            tvec_qvec = (SE3.exp(xi) * P1).data
         | 
| 355 | 
            +
                            self.poses_[self.n] = tvec_qvec
         | 
| 356 | 
            +
                        else:
         | 
| 357 | 
            +
                            tvec_qvec = self.poses[self.n-1]
         | 
| 358 | 
            +
                            self.poses_[self.n] = tvec_qvec
         | 
| 359 | 
            +
             | 
| 360 | 
            +
                    # TODO better depth initialization
         | 
| 361 | 
            +
                    patches[:,:,2] = torch.rand_like(patches[:,:,2,0,0,None,None])
         | 
| 362 | 
            +
                    if self.is_initialized:
         | 
| 363 | 
            +
                        s = torch.median(self.patches_[self.n-3:self.n,:,2])
         | 
| 364 | 
            +
                        patches[:,:,2] = s
         | 
| 365 | 
            +
             | 
| 366 | 
            +
                    self.patches_[self.n] = patches
         | 
| 367 | 
            +
             | 
| 368 | 
            +
                    ### update network attributes ###
         | 
| 369 | 
            +
                    self.imap_[self.n % self.mem] = imap.squeeze()
         | 
| 370 | 
            +
                    self.gmap_[self.n % self.mem] = gmap.squeeze()
         | 
| 371 | 
            +
                    self.fmap1_[:, self.n % self.mem] = F.avg_pool2d(fmap[0], 1, 1)
         | 
| 372 | 
            +
                    self.fmap2_[:, self.n % self.mem] = F.avg_pool2d(fmap[0], 4, 4)
         | 
| 373 | 
            +
             | 
| 374 | 
            +
                    self.counter += 1        
         | 
| 375 | 
            +
                    if self.n > 0 and not self.is_initialized:
         | 
| 376 | 
            +
                        if self.motion_probe() < 2.0:
         | 
| 377 | 
            +
                            self.delta[self.counter - 1] = (self.counter - 2, Id[0])
         | 
| 378 | 
            +
                            return
         | 
| 379 | 
            +
             | 
| 380 | 
            +
                    self.n += 1
         | 
| 381 | 
            +
                    self.m += self.M
         | 
| 382 | 
            +
             | 
| 383 | 
            +
                    # relative pose
         | 
| 384 | 
            +
                    self.append_factors(*self.__edges_forw())
         | 
| 385 | 
            +
                    self.append_factors(*self.__edges_back())
         | 
| 386 | 
            +
             | 
| 387 | 
            +
                    if self.n == 8 and not self.is_initialized:
         | 
| 388 | 
            +
                        self.is_initialized = True            
         | 
| 389 | 
            +
             | 
| 390 | 
            +
                        for itr in range(12):
         | 
| 391 | 
            +
                            self.update()
         | 
| 392 | 
            +
                    
         | 
| 393 | 
            +
                    elif self.is_initialized:
         | 
| 394 | 
            +
                        self.update()
         | 
| 395 | 
            +
                        self.keyframe()
         | 
| 396 | 
            +
             | 
| 397 | 
            +
                        
         | 
| 398 | 
            +
             | 
| 399 | 
            +
             | 
| 400 | 
            +
             | 
| 401 | 
            +
             | 
| 402 | 
            +
             | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/extractor.py
    ADDED
    
    | @@ -0,0 +1,264 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import torch.nn as nn
         | 
| 3 | 
            +
            import torch.nn.functional as F
         | 
| 4 | 
            +
             | 
| 5 | 
            +
             | 
| 6 | 
            +
            class ResidualBlock(nn.Module):
         | 
| 7 | 
            +
                def __init__(self, in_planes, planes, norm_fn='group', stride=1):
         | 
| 8 | 
            +
                    super(ResidualBlock, self).__init__()
         | 
| 9 | 
            +
              
         | 
| 10 | 
            +
                    self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
         | 
| 11 | 
            +
                    self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
         | 
| 12 | 
            +
                    self.relu = nn.ReLU(inplace=True)
         | 
| 13 | 
            +
             | 
| 14 | 
            +
                    num_groups = planes // 8
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                    if norm_fn == 'group':
         | 
| 17 | 
            +
                        self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
         | 
| 18 | 
            +
                        self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
         | 
| 19 | 
            +
                        if not stride == 1:
         | 
| 20 | 
            +
                            self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
         | 
| 21 | 
            +
                    
         | 
| 22 | 
            +
                    elif norm_fn == 'batch':
         | 
| 23 | 
            +
                        self.norm1 = nn.BatchNorm2d(planes)
         | 
| 24 | 
            +
                        self.norm2 = nn.BatchNorm2d(planes)
         | 
| 25 | 
            +
                        if not stride == 1:
         | 
| 26 | 
            +
                            self.norm3 = nn.BatchNorm2d(planes)
         | 
| 27 | 
            +
                    
         | 
| 28 | 
            +
                    elif norm_fn == 'instance':
         | 
| 29 | 
            +
                        self.norm1 = nn.InstanceNorm2d(planes)
         | 
| 30 | 
            +
                        self.norm2 = nn.InstanceNorm2d(planes)
         | 
| 31 | 
            +
                        if not stride == 1:
         | 
| 32 | 
            +
                            self.norm3 = nn.InstanceNorm2d(planes)
         | 
| 33 | 
            +
             | 
| 34 | 
            +
                    elif norm_fn == 'none':
         | 
| 35 | 
            +
                        self.norm1 = nn.Sequential()
         | 
| 36 | 
            +
                        self.norm2 = nn.Sequential()
         | 
| 37 | 
            +
                        if not stride == 1:
         | 
| 38 | 
            +
                            self.norm3 = nn.Sequential()
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                    if stride == 1:
         | 
| 41 | 
            +
                        self.downsample = None
         | 
| 42 | 
            +
                    
         | 
| 43 | 
            +
                    else:    
         | 
| 44 | 
            +
                        self.downsample = nn.Sequential(
         | 
| 45 | 
            +
                            nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                def forward(self, x):
         | 
| 48 | 
            +
                    y = x
         | 
| 49 | 
            +
                    y = self.relu(self.norm1(self.conv1(y)))
         | 
| 50 | 
            +
                    y = self.relu(self.norm2(self.conv2(y)))
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                    if self.downsample is not None:
         | 
| 53 | 
            +
                        x = self.downsample(x)
         | 
| 54 | 
            +
             | 
| 55 | 
            +
                    return self.relu(x+y)
         | 
| 56 | 
            +
             | 
| 57 | 
            +
             | 
| 58 | 
            +
            class BottleneckBlock(nn.Module):
         | 
| 59 | 
            +
                def __init__(self, in_planes, planes, norm_fn='group', stride=1):
         | 
| 60 | 
            +
                    super(BottleneckBlock, self).__init__()
         | 
| 61 | 
            +
              
         | 
| 62 | 
            +
                    self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0)
         | 
| 63 | 
            +
                    self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride)
         | 
| 64 | 
            +
                    self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0)
         | 
| 65 | 
            +
                    self.relu = nn.ReLU(inplace=True)
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                    num_groups = planes // 8
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                    if norm_fn == 'group':
         | 
| 70 | 
            +
                        self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
         | 
| 71 | 
            +
                        self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4)
         | 
| 72 | 
            +
                        self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
         | 
| 73 | 
            +
                        if not stride == 1:
         | 
| 74 | 
            +
                            self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
         | 
| 75 | 
            +
                    
         | 
| 76 | 
            +
                    elif norm_fn == 'batch':
         | 
| 77 | 
            +
                        self.norm1 = nn.BatchNorm2d(planes//4)
         | 
| 78 | 
            +
                        self.norm2 = nn.BatchNorm2d(planes//4)
         | 
| 79 | 
            +
                        self.norm3 = nn.BatchNorm2d(planes)
         | 
| 80 | 
            +
                        if not stride == 1:
         | 
| 81 | 
            +
                            self.norm4 = nn.BatchNorm2d(planes)
         | 
| 82 | 
            +
                    
         | 
| 83 | 
            +
                    elif norm_fn == 'instance':
         | 
| 84 | 
            +
                        self.norm1 = nn.InstanceNorm2d(planes//4)
         | 
| 85 | 
            +
                        self.norm2 = nn.InstanceNorm2d(planes//4)
         | 
| 86 | 
            +
                        self.norm3 = nn.InstanceNorm2d(planes)
         | 
| 87 | 
            +
                        if not stride == 1:
         | 
| 88 | 
            +
                            self.norm4 = nn.InstanceNorm2d(planes)
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                    elif norm_fn == 'none':
         | 
| 91 | 
            +
                        self.norm1 = nn.Sequential()
         | 
| 92 | 
            +
                        self.norm2 = nn.Sequential()
         | 
| 93 | 
            +
                        self.norm3 = nn.Sequential()
         | 
| 94 | 
            +
                        if not stride == 1:
         | 
| 95 | 
            +
                            self.norm4 = nn.Sequential()
         | 
| 96 | 
            +
             | 
| 97 | 
            +
                    if stride == 1:
         | 
| 98 | 
            +
                        self.downsample = None
         | 
| 99 | 
            +
                    
         | 
| 100 | 
            +
                    else:    
         | 
| 101 | 
            +
                        self.downsample = nn.Sequential(
         | 
| 102 | 
            +
                            nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
         | 
| 103 | 
            +
             | 
| 104 | 
            +
                def forward(self, x):
         | 
| 105 | 
            +
                    y = x
         | 
| 106 | 
            +
                    y = self.relu(self.norm1(self.conv1(y)))
         | 
| 107 | 
            +
                    y = self.relu(self.norm2(self.conv2(y)))
         | 
| 108 | 
            +
                    y = self.relu(self.norm3(self.conv3(y)))
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                    if self.downsample is not None:
         | 
| 111 | 
            +
                        x = self.downsample(x)
         | 
| 112 | 
            +
             | 
| 113 | 
            +
                    return self.relu(x+y)
         | 
| 114 | 
            +
             | 
| 115 | 
            +
            DIM=32
         | 
| 116 | 
            +
             | 
| 117 | 
            +
            class BasicEncoder(nn.Module):
         | 
| 118 | 
            +
                def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0, multidim=False):
         | 
| 119 | 
            +
                    super(BasicEncoder, self).__init__()
         | 
| 120 | 
            +
                    self.norm_fn = norm_fn
         | 
| 121 | 
            +
                    self.multidim = multidim
         | 
| 122 | 
            +
             | 
| 123 | 
            +
                    if self.norm_fn == 'group':
         | 
| 124 | 
            +
                        self.norm1 = nn.GroupNorm(num_groups=8, num_channels=DIM)
         | 
| 125 | 
            +
                        
         | 
| 126 | 
            +
                    elif self.norm_fn == 'batch':
         | 
| 127 | 
            +
                        self.norm1 = nn.BatchNorm2d(DIM)
         | 
| 128 | 
            +
             | 
| 129 | 
            +
                    elif self.norm_fn == 'instance':
         | 
| 130 | 
            +
                        self.norm1 = nn.InstanceNorm2d(DIM)
         | 
| 131 | 
            +
             | 
| 132 | 
            +
                    elif self.norm_fn == 'none':
         | 
| 133 | 
            +
                        self.norm1 = nn.Sequential()
         | 
| 134 | 
            +
             | 
| 135 | 
            +
                    self.conv1 = nn.Conv2d(3, DIM, kernel_size=7, stride=2, padding=3)
         | 
| 136 | 
            +
                    self.relu1 = nn.ReLU(inplace=True)
         | 
| 137 | 
            +
             | 
| 138 | 
            +
                    self.in_planes = DIM
         | 
| 139 | 
            +
                    self.layer1 = self._make_layer(DIM,  stride=1)
         | 
| 140 | 
            +
                    self.layer2 = self._make_layer(2*DIM, stride=2)
         | 
| 141 | 
            +
                    self.layer3 = self._make_layer(4*DIM, stride=2)
         | 
| 142 | 
            +
             | 
| 143 | 
            +
                    # output convolution
         | 
| 144 | 
            +
                    self.conv2 = nn.Conv2d(4*DIM, output_dim, kernel_size=1)
         | 
| 145 | 
            +
             | 
| 146 | 
            +
                    if self.multidim:
         | 
| 147 | 
            +
                        self.layer4 = self._make_layer(256, stride=2)
         | 
| 148 | 
            +
                        self.layer5 = self._make_layer(512, stride=2)
         | 
| 149 | 
            +
             | 
| 150 | 
            +
                        self.in_planes = 256
         | 
| 151 | 
            +
                        self.layer6 = self._make_layer(256, stride=1)
         | 
| 152 | 
            +
             | 
| 153 | 
            +
                        self.in_planes = 128
         | 
| 154 | 
            +
                        self.layer7 = self._make_layer(128, stride=1)
         | 
| 155 | 
            +
             | 
| 156 | 
            +
                        self.up1 = nn.Conv2d(512, 256, 1)
         | 
| 157 | 
            +
                        self.up2 = nn.Conv2d(256, 128, 1)
         | 
| 158 | 
            +
                        self.conv3 = nn.Conv2d(128, output_dim, kernel_size=1)
         | 
| 159 | 
            +
             | 
| 160 | 
            +
                    if dropout > 0:
         | 
| 161 | 
            +
                        self.dropout = nn.Dropout2d(p=dropout)
         | 
| 162 | 
            +
                    else:
         | 
| 163 | 
            +
                        self.dropout = None
         | 
| 164 | 
            +
             | 
| 165 | 
            +
                    for m in self.modules():
         | 
| 166 | 
            +
                        if isinstance(m, nn.Conv2d):
         | 
| 167 | 
            +
                            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
         | 
| 168 | 
            +
                        elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
         | 
| 169 | 
            +
                            if m.weight is not None:
         | 
| 170 | 
            +
                                nn.init.constant_(m.weight, 1)
         | 
| 171 | 
            +
                            if m.bias is not None:
         | 
| 172 | 
            +
                                nn.init.constant_(m.bias, 0)
         | 
| 173 | 
            +
             | 
| 174 | 
            +
                def _make_layer(self, dim, stride=1):
         | 
| 175 | 
            +
                    layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
         | 
| 176 | 
            +
                    layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
         | 
| 177 | 
            +
                    layers = (layer1, layer2)
         | 
| 178 | 
            +
                    
         | 
| 179 | 
            +
                    self.in_planes = dim
         | 
| 180 | 
            +
                    return nn.Sequential(*layers)
         | 
| 181 | 
            +
             | 
| 182 | 
            +
                def forward(self, x):
         | 
| 183 | 
            +
                    b, n, c1, h1, w1 = x.shape
         | 
| 184 | 
            +
                    x = x.view(b*n, c1, h1, w1)
         | 
| 185 | 
            +
             | 
| 186 | 
            +
                    x = self.conv1(x)
         | 
| 187 | 
            +
                    x = self.norm1(x)
         | 
| 188 | 
            +
                    x = self.relu1(x)
         | 
| 189 | 
            +
             | 
| 190 | 
            +
                    x = self.layer1(x)
         | 
| 191 | 
            +
                    x = self.layer2(x)
         | 
| 192 | 
            +
                    x = self.layer3(x)
         | 
| 193 | 
            +
             | 
| 194 | 
            +
                    x = self.conv2(x)
         | 
| 195 | 
            +
             | 
| 196 | 
            +
                    _, c2, h2, w2 = x.shape
         | 
| 197 | 
            +
                    return x.view(b, n, c2, h2, w2)
         | 
| 198 | 
            +
             | 
| 199 | 
            +
             | 
| 200 | 
            +
            class BasicEncoder4(nn.Module):
         | 
| 201 | 
            +
                def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0, multidim=False):
         | 
| 202 | 
            +
                    super(BasicEncoder4, self).__init__()
         | 
| 203 | 
            +
                    self.norm_fn = norm_fn
         | 
| 204 | 
            +
                    self.multidim = multidim
         | 
| 205 | 
            +
             | 
| 206 | 
            +
                    if self.norm_fn == 'group':
         | 
| 207 | 
            +
                        self.norm1 = nn.GroupNorm(num_groups=8, num_channels=DIM)
         | 
| 208 | 
            +
                        
         | 
| 209 | 
            +
                    elif self.norm_fn == 'batch':
         | 
| 210 | 
            +
                        self.norm1 = nn.BatchNorm2d(DIM)
         | 
| 211 | 
            +
             | 
| 212 | 
            +
                    elif self.norm_fn == 'instance':
         | 
| 213 | 
            +
                        self.norm1 = nn.InstanceNorm2d(DIM)
         | 
| 214 | 
            +
             | 
| 215 | 
            +
                    elif self.norm_fn == 'none':
         | 
| 216 | 
            +
                        self.norm1 = nn.Sequential()
         | 
| 217 | 
            +
             | 
| 218 | 
            +
                    self.conv1 = nn.Conv2d(3, DIM, kernel_size=7, stride=2, padding=3)
         | 
| 219 | 
            +
                    self.relu1 = nn.ReLU(inplace=True)
         | 
| 220 | 
            +
             | 
| 221 | 
            +
                    self.in_planes = DIM
         | 
| 222 | 
            +
                    self.layer1 = self._make_layer(DIM,  stride=1)
         | 
| 223 | 
            +
                    self.layer2 = self._make_layer(2*DIM, stride=2)
         | 
| 224 | 
            +
             | 
| 225 | 
            +
                    # output convolution
         | 
| 226 | 
            +
                    self.conv2 = nn.Conv2d(2*DIM, output_dim, kernel_size=1)
         | 
| 227 | 
            +
             | 
| 228 | 
            +
                    if dropout > 0:
         | 
| 229 | 
            +
                        self.dropout = nn.Dropout2d(p=dropout)
         | 
| 230 | 
            +
                    else:
         | 
| 231 | 
            +
                        self.dropout = None
         | 
| 232 | 
            +
             | 
| 233 | 
            +
                    for m in self.modules():
         | 
| 234 | 
            +
                        if isinstance(m, nn.Conv2d):
         | 
| 235 | 
            +
                            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
         | 
| 236 | 
            +
                        elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
         | 
| 237 | 
            +
                            if m.weight is not None:
         | 
| 238 | 
            +
                                nn.init.constant_(m.weight, 1)
         | 
| 239 | 
            +
                            if m.bias is not None:
         | 
| 240 | 
            +
                                nn.init.constant_(m.bias, 0)
         | 
| 241 | 
            +
             | 
| 242 | 
            +
                def _make_layer(self, dim, stride=1):
         | 
| 243 | 
            +
                    layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
         | 
| 244 | 
            +
                    layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
         | 
| 245 | 
            +
                    layers = (layer1, layer2)
         | 
| 246 | 
            +
                    
         | 
| 247 | 
            +
                    self.in_planes = dim
         | 
| 248 | 
            +
                    return nn.Sequential(*layers)
         | 
| 249 | 
            +
             | 
| 250 | 
            +
                def forward(self, x):
         | 
| 251 | 
            +
                    b, n, c1, h1, w1 = x.shape
         | 
| 252 | 
            +
                    x = x.view(b*n, c1, h1, w1)
         | 
| 253 | 
            +
             | 
| 254 | 
            +
                    x = self.conv1(x)
         | 
| 255 | 
            +
                    x = self.norm1(x)
         | 
| 256 | 
            +
                    x = self.relu1(x)
         | 
| 257 | 
            +
             | 
| 258 | 
            +
                    x = self.layer1(x)
         | 
| 259 | 
            +
                    x = self.layer2(x)
         | 
| 260 | 
            +
             | 
| 261 | 
            +
                    x = self.conv2(x)
         | 
| 262 | 
            +
             | 
| 263 | 
            +
                    _, c2, h2, w2 = x.shape
         | 
| 264 | 
            +
                    return x.view(b, n, c2, h2, w2)
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/fastba/__init__.py
    ADDED
    
    | @@ -0,0 +1 @@ | |
|  | 
|  | |
| 1 | 
            +
            from .ba import BA, neighbors, reproject
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/fastba/ba.py
    ADDED
    
    | @@ -0,0 +1,8 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import cuda_ba
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            neighbors = cuda_ba.neighbors
         | 
| 5 | 
            +
            reproject = cuda_ba.reproject
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            def BA(poses, patches, intrinsics, target, weight, lmbda, ii, jj, kk, t0, t1, iterations=2):
         | 
| 8 | 
            +
                return cuda_ba.forward(poses.data, patches, intrinsics, target, weight, lmbda, ii, jj, kk, t0, t1, iterations)
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/__init__.py
    ADDED
    
    | @@ -0,0 +1,2 @@ | |
|  | |
|  | 
|  | |
| 1 | 
            +
            __all__ = ['groups']
         | 
| 2 | 
            +
            from .groups import LieGroupParameter, SO3, RxSO3, SE3, Sim3, cat, stack
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/broadcasting.py
    ADDED
    
    | @@ -0,0 +1,31 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import numpy as np
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            def check_broadcastable(x, y):
         | 
| 5 | 
            +
                assert len(x.shape) == len(y.shape)
         | 
| 6 | 
            +
                for (n, m) in zip(x.shape[:-1], y.shape[:-1]):
         | 
| 7 | 
            +
                    assert n==m or n==1 or m==1
         | 
| 8 | 
            +
             | 
| 9 | 
            +
            def broadcast_inputs(x, y):
         | 
| 10 | 
            +
                """ Automatic broadcasting of missing dimensions """
         | 
| 11 | 
            +
                if y is None:
         | 
| 12 | 
            +
                    xs, xd = x.shape[:-1], x.shape[-1] 
         | 
| 13 | 
            +
                    return (x.view(-1, xd).contiguous(), ), x.shape[:-1]
         | 
| 14 | 
            +
             | 
| 15 | 
            +
                check_broadcastable(x, y)
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                xs, xd = x.shape[:-1], x.shape[-1] 
         | 
| 18 | 
            +
                ys, yd = y.shape[:-1], y.shape[-1]
         | 
| 19 | 
            +
                out_shape = [max(n,m) for (n,m) in zip(xs,ys)]
         | 
| 20 | 
            +
             | 
| 21 | 
            +
                if x.shape[:-1] == y.shape[-1]:
         | 
| 22 | 
            +
                    x1 = x.view(-1, xd)
         | 
| 23 | 
            +
                    y1 = y.view(-1, yd)
         | 
| 24 | 
            +
             | 
| 25 | 
            +
                else:
         | 
| 26 | 
            +
                    x_expand = [m if n==1 else 1 for (n,m) in zip(xs, ys)]
         | 
| 27 | 
            +
                    y_expand = [n if m==1 else 1 for (n,m) in zip(xs, ys)]
         | 
| 28 | 
            +
                    x1 = x.repeat(x_expand + [1]).reshape(-1, xd).contiguous()
         | 
| 29 | 
            +
                    y1 = y.repeat(y_expand + [1]).reshape(-1, yd).contiguous()
         | 
| 30 | 
            +
             | 
| 31 | 
            +
                return (x1, y1), tuple(out_shape)
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/gradcheck.py
    ADDED
    
    | @@ -0,0 +1,592 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            TORCH_MAJOR = int(torch.__version__.split('.')[0])
         | 
| 4 | 
            +
            TORCH_MINOR = int(torch.__version__.split('.')[1])
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            from torch.types import _TensorOrTensors
         | 
| 7 | 
            +
            if TORCH_MAJOR == 1 and TORCH_MINOR < 8:
         | 
| 8 | 
            +
                from torch._six import container_abcs, istuple
         | 
| 9 | 
            +
            else:
         | 
| 10 | 
            +
                import collections.abc as container_abcs
         | 
| 11 | 
            +
             | 
| 12 | 
            +
            import torch.testing
         | 
| 13 | 
            +
            from torch.overrides import is_tensor_like
         | 
| 14 | 
            +
            from itertools import product
         | 
| 15 | 
            +
            import warnings
         | 
| 16 | 
            +
            from typing import Callable, Union, Optional, Iterable, List
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            def zero_gradients(x):
         | 
| 19 | 
            +
                if isinstance(x, torch.Tensor):
         | 
| 20 | 
            +
                    if x.grad is not None:
         | 
| 21 | 
            +
                        x.grad.detach_()
         | 
| 22 | 
            +
                        x.grad.zero_()
         | 
| 23 | 
            +
                elif isinstance(x, container_abcs.Iterable):
         | 
| 24 | 
            +
                    for elem in x:
         | 
| 25 | 
            +
                        zero_gradients(elem)
         | 
| 26 | 
            +
             | 
| 27 | 
            +
             | 
| 28 | 
            +
            def make_jacobian(input, num_out):
         | 
| 29 | 
            +
                if is_tensor_like(input):
         | 
| 30 | 
            +
                    if not input.is_floating_point() and not input.is_complex():
         | 
| 31 | 
            +
                        return None
         | 
| 32 | 
            +
                    if not input.requires_grad:
         | 
| 33 | 
            +
                        return None
         | 
| 34 | 
            +
                    return input.new_zeros((input.nelement(), num_out), dtype=input.dtype, layout=torch.strided)
         | 
| 35 | 
            +
                elif isinstance(input, container_abcs.Iterable) and not isinstance(input, str):
         | 
| 36 | 
            +
                    jacobians = list(filter(
         | 
| 37 | 
            +
                        lambda x: x is not None, (make_jacobian(elem, num_out) for elem in input)))
         | 
| 38 | 
            +
                    if not jacobians:
         | 
| 39 | 
            +
                        return None
         | 
| 40 | 
            +
                    return type(input)(jacobians)  # type: ignore
         | 
| 41 | 
            +
                else:
         | 
| 42 | 
            +
                    return None
         | 
| 43 | 
            +
             | 
| 44 | 
            +
             | 
| 45 | 
            +
            def iter_tensors(x: Union[torch.Tensor, Iterable[torch.Tensor]], only_requiring_grad: bool = False) -> Iterable[torch.Tensor]:
         | 
| 46 | 
            +
                if is_tensor_like(x):
         | 
| 47 | 
            +
                    # mypy doesn't narrow type of `x` to torch.Tensor
         | 
| 48 | 
            +
                    if x.requires_grad or not only_requiring_grad:  # type: ignore
         | 
| 49 | 
            +
                        yield x  # type: ignore
         | 
| 50 | 
            +
                elif isinstance(x, container_abcs.Iterable) and not isinstance(x, str):
         | 
| 51 | 
            +
                    for elem in x:
         | 
| 52 | 
            +
                        for result in iter_tensors(elem, only_requiring_grad):
         | 
| 53 | 
            +
                            yield result
         | 
| 54 | 
            +
             | 
| 55 | 
            +
            def get_numerical_jacobian(fn, input, target=None, eps=1e-3, grad_out=1.0):
         | 
| 56 | 
            +
                """
         | 
| 57 | 
            +
                input: input to `fn`
         | 
| 58 | 
            +
                target: the Tensors wrt whom Jacobians are calculated (default=`input`)
         | 
| 59 | 
            +
                grad_out: grad output value used to calculate gradients.
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                Note that `target` may not even be part of `input` to `fn`, so please be
         | 
| 62 | 
            +
                **very careful** in this to not clone `target`.
         | 
| 63 | 
            +
                """
         | 
| 64 | 
            +
                if target is None:
         | 
| 65 | 
            +
                    target = input
         | 
| 66 | 
            +
                output_size = fn(input).numel()
         | 
| 67 | 
            +
                jacobian = make_jacobian(target, output_size)
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                # It's much easier to iterate over flattened lists of tensors.
         | 
| 70 | 
            +
                # These are reference to the same objects in jacobian, so any changes
         | 
| 71 | 
            +
                # will be reflected in it as well.
         | 
| 72 | 
            +
                x_tensors = iter_tensors(target, True)
         | 
| 73 | 
            +
                j_tensors = iter_tensors(jacobian)
         | 
| 74 | 
            +
             | 
| 75 | 
            +
                def update_jacobians(x, idx, d, d_idx, is_mkldnn=False):
         | 
| 76 | 
            +
             | 
| 77 | 
            +
                    # compute_jacobian only works for pure real
         | 
| 78 | 
            +
                    # or pure imaginary delta
         | 
| 79 | 
            +
                    def compute_gradient(delta):
         | 
| 80 | 
            +
                        # we currently assume that the norm of delta equals eps
         | 
| 81 | 
            +
                        assert(delta == eps or delta == (eps * 1j))
         | 
| 82 | 
            +
             | 
| 83 | 
            +
                        def fn_out():
         | 
| 84 | 
            +
                            if not is_mkldnn:
         | 
| 85 | 
            +
                                # x is a view into input and so this works
         | 
| 86 | 
            +
                                return fn(input).clone()
         | 
| 87 | 
            +
                            else:
         | 
| 88 | 
            +
                                # convert the dense tensor back to have mkldnn layout
         | 
| 89 | 
            +
                                return fn([x.to_mkldnn()])
         | 
| 90 | 
            +
             | 
| 91 | 
            +
                        orig = x[idx].item()
         | 
| 92 | 
            +
                        x[idx] = orig - delta
         | 
| 93 | 
            +
                        outa = fn_out()
         | 
| 94 | 
            +
                        x[idx] = orig + delta
         | 
| 95 | 
            +
                        outb = fn_out()
         | 
| 96 | 
            +
                        x[idx] = orig
         | 
| 97 | 
            +
                        r = (outb - outa) / (2 * eps)
         | 
| 98 | 
            +
                        return r.detach().reshape(-1)
         | 
| 99 | 
            +
             | 
| 100 | 
            +
                    # for details on the algorithm used here, refer:
         | 
| 101 | 
            +
                    # Section 3.5.3 https://arxiv.org/pdf/1701.00392.pdf
         | 
| 102 | 
            +
                    # s = fn(z) where z = x for real valued input
         | 
| 103 | 
            +
                    # and z = x + yj for complex valued input
         | 
| 104 | 
            +
                    ds_dx = compute_gradient(eps)
         | 
| 105 | 
            +
                    if x.is_complex():  # C -> C, C -> R
         | 
| 106 | 
            +
                        ds_dy = compute_gradient(eps * 1j)
         | 
| 107 | 
            +
                        # conjugate wirtinger derivative
         | 
| 108 | 
            +
                        conj_w_d = 0.5 * (ds_dx + ds_dy * 1j)
         | 
| 109 | 
            +
                        # wirtinger derivative
         | 
| 110 | 
            +
                        w_d = 0.5 * (ds_dx - ds_dy * 1j)
         | 
| 111 | 
            +
                        d[d_idx] = grad_out.conjugate() * conj_w_d + grad_out * w_d.conj()
         | 
| 112 | 
            +
                    elif ds_dx.is_complex():  # R -> C
         | 
| 113 | 
            +
                        # w_d = conj_w_d = 0.5 * ds_dx
         | 
| 114 | 
            +
                        # dL_dz_conj = 0.5 * [grad_out.conj() * ds_dx + grad_out * ds_dx.conj()]
         | 
| 115 | 
            +
                        #            = 0.5 * [grad_out.conj() * ds_dx + (grad_out.conj() * ds_dx).conj()]
         | 
| 116 | 
            +
                        #            = 0.5 * 2 * real(grad_out.conj() * ds_dx)
         | 
| 117 | 
            +
                        #            = real(grad_out.conj() * ds_dx)
         | 
| 118 | 
            +
                        d[d_idx] = torch.real(grad_out.conjugate() * ds_dx)
         | 
| 119 | 
            +
                    else:   # R -> R
         | 
| 120 | 
            +
                        d[d_idx] = ds_dx * grad_out
         | 
| 121 | 
            +
             | 
| 122 | 
            +
                # TODO: compare structure
         | 
| 123 | 
            +
                for x_tensor, d_tensor in zip(x_tensors, j_tensors):
         | 
| 124 | 
            +
                    if x_tensor.is_sparse:
         | 
| 125 | 
            +
                        def get_stride(size):
         | 
| 126 | 
            +
                            dim = len(size)
         | 
| 127 | 
            +
                            tmp = 1
         | 
| 128 | 
            +
                            stride = [0] * dim
         | 
| 129 | 
            +
                            for i in reversed(range(dim)):
         | 
| 130 | 
            +
                                stride[i] = tmp
         | 
| 131 | 
            +
                                tmp *= size[i]
         | 
| 132 | 
            +
                            return stride
         | 
| 133 | 
            +
             | 
| 134 | 
            +
                        x_nnz = x_tensor._nnz()
         | 
| 135 | 
            +
                        x_size = list(x_tensor.size())
         | 
| 136 | 
            +
                        x_indices = x_tensor._indices().t()
         | 
| 137 | 
            +
                        x_values = x_tensor._values()
         | 
| 138 | 
            +
                        x_stride = get_stride(x_size)
         | 
| 139 | 
            +
             | 
| 140 | 
            +
                        # Use .data here to get around the version check
         | 
| 141 | 
            +
                        x_values = x_values.data
         | 
| 142 | 
            +
             | 
| 143 | 
            +
                        for i in range(x_nnz):
         | 
| 144 | 
            +
                            x_value = x_values[i]
         | 
| 145 | 
            +
                            for x_idx in product(*[range(m) for m in x_values.size()[1:]]):
         | 
| 146 | 
            +
                                indices = x_indices[i].tolist() + list(x_idx)
         | 
| 147 | 
            +
                                d_idx = sum(indices[k] * x_stride[k] for k in range(len(x_size)))
         | 
| 148 | 
            +
                                update_jacobians(x_value, x_idx, d_tensor, d_idx)
         | 
| 149 | 
            +
                    elif x_tensor.layout == torch._mkldnn:  # type: ignore
         | 
| 150 | 
            +
                        # Use .data here to get around the version check
         | 
| 151 | 
            +
                        x_tensor = x_tensor.data
         | 
| 152 | 
            +
                        if len(input) != 1:
         | 
| 153 | 
            +
                            raise ValueError('gradcheck currently only supports functions with 1 input, but got: ',
         | 
| 154 | 
            +
                                             len(input))
         | 
| 155 | 
            +
                        for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])):
         | 
| 156 | 
            +
                            # this is really inefficient, but without indexing implemented, there's
         | 
| 157 | 
            +
                            # not really a better way than converting back and forth
         | 
| 158 | 
            +
                            x_tensor_dense = x_tensor.to_dense()
         | 
| 159 | 
            +
                            update_jacobians(x_tensor_dense, x_idx, d_tensor, d_idx, is_mkldnn=True)
         | 
| 160 | 
            +
                    else:
         | 
| 161 | 
            +
                        # Use .data here to get around the version check
         | 
| 162 | 
            +
                        x_tensor = x_tensor.data
         | 
| 163 | 
            +
                        for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])):
         | 
| 164 | 
            +
                            update_jacobians(x_tensor, x_idx, d_tensor, d_idx)
         | 
| 165 | 
            +
             | 
| 166 | 
            +
                return jacobian
         | 
| 167 | 
            +
             | 
| 168 | 
            +
             | 
| 169 | 
            +
            def get_analytical_jacobian(input, output, nondet_tol=0.0, grad_out=1.0):
         | 
| 170 | 
            +
                # it is easier to call to_dense() on the sparse output than
         | 
| 171 | 
            +
                # to modify analytical jacobian
         | 
| 172 | 
            +
                if output.is_sparse:
         | 
| 173 | 
            +
                    raise ValueError('Sparse output is not supported at gradcheck yet. '
         | 
| 174 | 
            +
                                     'Please call to_dense() on the output of fn for gradcheck.')
         | 
| 175 | 
            +
                if output.layout == torch._mkldnn:  # type: ignore
         | 
| 176 | 
            +
                    raise ValueError('MKLDNN output is not supported at gradcheck yet. '
         | 
| 177 | 
            +
                                     'Please call to_dense() on the output of fn for gradcheck.')
         | 
| 178 | 
            +
                diff_input_list = list(iter_tensors(input, True))
         | 
| 179 | 
            +
                jacobian = make_jacobian(input, output.numel())
         | 
| 180 | 
            +
                jacobian_reentrant = make_jacobian(input, output.numel())
         | 
| 181 | 
            +
                grad_output = torch.zeros_like(output, memory_format=torch.legacy_contiguous_format)
         | 
| 182 | 
            +
                flat_grad_output = grad_output.view(-1)
         | 
| 183 | 
            +
                reentrant = True
         | 
| 184 | 
            +
                correct_grad_sizes = True
         | 
| 185 | 
            +
                correct_grad_types = True
         | 
| 186 | 
            +
             | 
| 187 | 
            +
                for i in range(flat_grad_output.numel()):
         | 
| 188 | 
            +
                    flat_grad_output.zero_()
         | 
| 189 | 
            +
                    flat_grad_output[i] = grad_out
         | 
| 190 | 
            +
                    for jacobian_c in (jacobian, jacobian_reentrant):
         | 
| 191 | 
            +
                        grads_input = torch.autograd.grad(output, diff_input_list, grad_output,
         | 
| 192 | 
            +
                                                          retain_graph=True, allow_unused=True)
         | 
| 193 | 
            +
                        for jacobian_x, d_x, x in zip(jacobian_c, grads_input, diff_input_list):
         | 
| 194 | 
            +
                            if d_x is not None and d_x.size() != x.size():
         | 
| 195 | 
            +
                                correct_grad_sizes = False
         | 
| 196 | 
            +
                            elif d_x is not None and d_x.dtype != x.dtype:
         | 
| 197 | 
            +
                                correct_grad_types = False
         | 
| 198 | 
            +
                            elif jacobian_x.numel() != 0:
         | 
| 199 | 
            +
                                if d_x is None:
         | 
| 200 | 
            +
                                    jacobian_x[:, i].zero_()
         | 
| 201 | 
            +
                                else:
         | 
| 202 | 
            +
                                    d_x_dense = d_x.to_dense() if not d_x.layout == torch.strided else d_x
         | 
| 203 | 
            +
                                    assert jacobian_x[:, i].numel() == d_x_dense.numel()
         | 
| 204 | 
            +
                                    jacobian_x[:, i] = d_x_dense.contiguous().view(-1)
         | 
| 205 | 
            +
             | 
| 206 | 
            +
                for jacobian_x, jacobian_reentrant_x in zip(jacobian, jacobian_reentrant):
         | 
| 207 | 
            +
                    if jacobian_x.numel() != 0 and (jacobian_x - jacobian_reentrant_x).abs().max() > nondet_tol:
         | 
| 208 | 
            +
                        reentrant = False
         | 
| 209 | 
            +
             | 
| 210 | 
            +
                return jacobian, reentrant, correct_grad_sizes, correct_grad_types
         | 
| 211 | 
            +
             | 
| 212 | 
            +
             | 
| 213 | 
            +
            def _as_tuple(x):
         | 
| 214 | 
            +
                if TORCH_MAJOR == 1 and TORCH_MINOR < 8:
         | 
| 215 | 
            +
                    b_tuple = istuple(x)  
         | 
| 216 | 
            +
                else:
         | 
| 217 | 
            +
                    b_tuple = isinstance(x, tuple)
         | 
| 218 | 
            +
                
         | 
| 219 | 
            +
                if b_tuple:
         | 
| 220 | 
            +
                    return x
         | 
| 221 | 
            +
                elif isinstance(x, list):
         | 
| 222 | 
            +
                    return tuple(x)
         | 
| 223 | 
            +
                else:
         | 
| 224 | 
            +
                    return x,
         | 
| 225 | 
            +
                
         | 
| 226 | 
            +
             | 
| 227 | 
            +
             | 
| 228 | 
            +
            def _differentiable_outputs(x):
         | 
| 229 | 
            +
                return tuple(o for o in _as_tuple(x) if o.requires_grad)
         | 
| 230 | 
            +
             | 
| 231 | 
            +
             | 
| 232 | 
            +
            # Note [VarArg of Tensors]
         | 
| 233 | 
            +
            # ~~~~~~~~~~~~~~~~~~~~~~~~
         | 
| 234 | 
            +
            # 'func' accepts a vararg of tensors, which isn't expressable in the type system at the moment.
         | 
| 235 | 
            +
            # If https://mypy.readthedocs.io/en/latest/additional_features.html?highlight=callable#extended-callable-types is accepted,
         | 
| 236 | 
            +
            # the '...' first argument of Callable can be replaced with VarArg(Tensor).
         | 
| 237 | 
            +
            # For now, we permit any input.
         | 
| 238 | 
            +
            # the '...' first argument of Callable can be replaced with VarArg(Tensor).
         | 
| 239 | 
            +
            # For now, we permit any input.
         | 
| 240 | 
            +
             | 
| 241 | 
            +
            def gradcheck(
         | 
| 242 | 
            +
                func: Callable[..., Union[_TensorOrTensors]],  # See Note [VarArg of Tensors]
         | 
| 243 | 
            +
                inputs: _TensorOrTensors,
         | 
| 244 | 
            +
                eps: float = 1e-6,
         | 
| 245 | 
            +
                atol: float = 1e-5,
         | 
| 246 | 
            +
                rtol: float = 1e-3,
         | 
| 247 | 
            +
                raise_exception: bool = True,
         | 
| 248 | 
            +
                check_sparse_nnz: bool = False,
         | 
| 249 | 
            +
                nondet_tol: float = 0.0,
         | 
| 250 | 
            +
                check_undefined_grad: bool = True,
         | 
| 251 | 
            +
                check_grad_dtypes: bool = False
         | 
| 252 | 
            +
            ) -> bool:
         | 
| 253 | 
            +
                r"""Check gradients computed via small finite differences against analytical
         | 
| 254 | 
            +
                gradients w.r.t. tensors in :attr:`inputs` that are of floating point or complex type
         | 
| 255 | 
            +
                and with ``requires_grad=True``.
         | 
| 256 | 
            +
             | 
| 257 | 
            +
                The check between numerical and analytical gradients uses :func:`~torch.allclose`.
         | 
| 258 | 
            +
             | 
| 259 | 
            +
                For complex functions, no notion of Jacobian exists. Gradcheck verifies if the numerical and
         | 
| 260 | 
            +
                analytical values of Wirtinger and Conjugate Wirtinger derivative are consistent. The gradient
         | 
| 261 | 
            +
                computation is done under the assumption that the overall function has a real valued output.
         | 
| 262 | 
            +
                For functions with complex output, gradcheck compares the numerical and analytical gradients
         | 
| 263 | 
            +
                for two values of :attr:`grad_output`: 1 and 1j. For more details, check out
         | 
| 264 | 
            +
                :ref:`complex_autograd-doc`.
         | 
| 265 | 
            +
             | 
| 266 | 
            +
                .. note::
         | 
| 267 | 
            +
                    The default values are designed for :attr:`input` of double precision.
         | 
| 268 | 
            +
                    This check will likely fail if :attr:`input` is of less precision, e.g.,
         | 
| 269 | 
            +
                    ``FloatTensor``.
         | 
| 270 | 
            +
             | 
| 271 | 
            +
                .. warning::
         | 
| 272 | 
            +
                   If any checked tensor in :attr:`input` has overlapping memory, i.e.,
         | 
| 273 | 
            +
                   different indices pointing to the same memory address (e.g., from
         | 
| 274 | 
            +
                   :func:`torch.expand`), this check will likely fail because the numerical
         | 
| 275 | 
            +
                   gradients computed by point perturbation at such indices will change
         | 
| 276 | 
            +
                   values at all other indices that share the same memory address.
         | 
| 277 | 
            +
             | 
| 278 | 
            +
                Args:
         | 
| 279 | 
            +
                    func (function): a Python function that takes Tensor inputs and returns
         | 
| 280 | 
            +
                        a Tensor or a tuple of Tensors
         | 
| 281 | 
            +
                    inputs (tuple of Tensor or Tensor): inputs to the function
         | 
| 282 | 
            +
                    eps (float, optional): perturbation for finite differences
         | 
| 283 | 
            +
                    atol (float, optional): absolute tolerance
         | 
| 284 | 
            +
                    rtol (float, optional): relative tolerance
         | 
| 285 | 
            +
                    raise_exception (bool, optional): indicating whether to raise an exception if
         | 
| 286 | 
            +
                        the check fails. The exception gives more information about the
         | 
| 287 | 
            +
                        exact nature of the failure. This is helpful when debugging gradchecks.
         | 
| 288 | 
            +
                    check_sparse_nnz (bool, optional): if True, gradcheck allows for SparseTensor input,
         | 
| 289 | 
            +
                        and for any SparseTensor at input, gradcheck will perform check at nnz positions only.
         | 
| 290 | 
            +
                    nondet_tol (float, optional): tolerance for non-determinism. When running
         | 
| 291 | 
            +
                        identical inputs through the differentiation, the results must either match
         | 
| 292 | 
            +
                        exactly (default, 0.0) or be within this tolerance.
         | 
| 293 | 
            +
                    check_undefined_grad (bool, options): if True, check if undefined output grads
         | 
| 294 | 
            +
                        are supported and treated as zeros, for ``Tensor`` outputs.
         | 
| 295 | 
            +
             | 
| 296 | 
            +
                Returns:
         | 
| 297 | 
            +
                    True if all differences satisfy allclose condition
         | 
| 298 | 
            +
                """
         | 
| 299 | 
            +
                def fail_test(msg):
         | 
| 300 | 
            +
                    if raise_exception:
         | 
| 301 | 
            +
                        raise RuntimeError(msg)
         | 
| 302 | 
            +
                    return False
         | 
| 303 | 
            +
             | 
| 304 | 
            +
                tupled_inputs = _as_tuple(inputs)
         | 
| 305 | 
            +
                if not check_sparse_nnz and any(t.is_sparse for t in tupled_inputs if isinstance(t, torch.Tensor)):
         | 
| 306 | 
            +
                    return fail_test('gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False.')
         | 
| 307 | 
            +
             | 
| 308 | 
            +
                # Make sure that gradients are saved for at least one input
         | 
| 309 | 
            +
                any_input_requiring_grad = False
         | 
| 310 | 
            +
                for idx, inp in enumerate(tupled_inputs):
         | 
| 311 | 
            +
                    if is_tensor_like(inp) and inp.requires_grad:
         | 
| 312 | 
            +
                        if not (inp.dtype == torch.float64 or inp.dtype == torch.complex128):
         | 
| 313 | 
            +
                            warnings.warn(
         | 
| 314 | 
            +
                                f'Input #{idx} requires gradient and '
         | 
| 315 | 
            +
                                'is not a double precision floating point or complex. '
         | 
| 316 | 
            +
                                'This check will likely fail if all the inputs are '
         | 
| 317 | 
            +
                                'not of double precision floating point or complex. ')
         | 
| 318 | 
            +
                        content = inp._values() if inp.is_sparse else inp
         | 
| 319 | 
            +
                        # TODO: To cover more problematic cases, replace stride = 0 check with
         | 
| 320 | 
            +
                        # "any overlap in memory" once we have a proper function to check it.
         | 
| 321 | 
            +
                        if content.layout is not torch._mkldnn:  # type: ignore
         | 
| 322 | 
            +
                            if not all(st > 0 or sz <= 1 for st, sz in zip(content.stride(), content.size())):
         | 
| 323 | 
            +
                                raise RuntimeError(
         | 
| 324 | 
            +
                                    'The {}th input has a dimension with stride 0. gradcheck only '
         | 
| 325 | 
            +
                                    'supports inputs that are non-overlapping to be able to '
         | 
| 326 | 
            +
                                    'compute the numerical gradients correctly. You should call '
         | 
| 327 | 
            +
                                    '.contiguous on the input before passing it to gradcheck.')
         | 
| 328 | 
            +
                        any_input_requiring_grad = True
         | 
| 329 | 
            +
                        inp.retain_grad()
         | 
| 330 | 
            +
                if not any_input_requiring_grad:
         | 
| 331 | 
            +
                    raise ValueError(
         | 
| 332 | 
            +
                        'gradcheck expects at least one input tensor to require gradient, '
         | 
| 333 | 
            +
                        'but none of the them have requires_grad=True.')
         | 
| 334 | 
            +
             | 
| 335 | 
            +
                func_out = func(*tupled_inputs)
         | 
| 336 | 
            +
                output = _differentiable_outputs(func_out)
         | 
| 337 | 
            +
             | 
| 338 | 
            +
                if not output:
         | 
| 339 | 
            +
                    for i, o in enumerate(func_out):
         | 
| 340 | 
            +
                        def fn(input):
         | 
| 341 | 
            +
                            return _as_tuple(func(*input))[i]
         | 
| 342 | 
            +
                        numerical = get_numerical_jacobian(fn, tupled_inputs, eps=eps)
         | 
| 343 | 
            +
                        for n in numerical:
         | 
| 344 | 
            +
                            if torch.ne(n, 0).sum() > 0:
         | 
| 345 | 
            +
                                return fail_test('Numerical gradient for function expected to be zero')
         | 
| 346 | 
            +
                    return True
         | 
| 347 | 
            +
             | 
| 348 | 
            +
                for i, o in enumerate(output):
         | 
| 349 | 
            +
                    if not o.requires_grad:
         | 
| 350 | 
            +
                        continue
         | 
| 351 | 
            +
             | 
| 352 | 
            +
                    def fn(input):
         | 
| 353 | 
            +
                        return _as_tuple(func(*input))[i]
         | 
| 354 | 
            +
             | 
| 355 | 
            +
                    analytical, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian(tupled_inputs,
         | 
| 356 | 
            +
                                                                                                            o,
         | 
| 357 | 
            +
                                                                                                            nondet_tol=nondet_tol)
         | 
| 358 | 
            +
                    numerical = get_numerical_jacobian(fn, tupled_inputs, eps=eps)
         | 
| 359 | 
            +
             | 
| 360 | 
            +
                    return analytical, numerical
         | 
| 361 | 
            +
             | 
| 362 | 
            +
                    out_is_complex = o.is_complex()
         | 
| 363 | 
            +
             | 
| 364 | 
            +
                    if out_is_complex:
         | 
| 365 | 
            +
                        # analytical vjp with grad_out = 1.0j
         | 
| 366 | 
            +
                        analytical_with_imag_grad_out, reentrant_with_imag_grad_out, \
         | 
| 367 | 
            +
                            correct_grad_sizes_with_imag_grad_out, correct_grad_types_with_imag_grad_out \
         | 
| 368 | 
            +
                            = get_analytical_jacobian(tupled_inputs, o, nondet_tol=nondet_tol, grad_out=1j)
         | 
| 369 | 
            +
                        numerical_with_imag_grad_out = get_numerical_jacobian(fn, tupled_inputs, eps=eps, grad_out=1j)
         | 
| 370 | 
            +
             | 
| 371 | 
            +
                    if not correct_grad_types and check_grad_dtypes:
         | 
| 372 | 
            +
                        return fail_test('Gradient has dtype mismatch')
         | 
| 373 | 
            +
             | 
| 374 | 
            +
                    if out_is_complex and not correct_grad_types_with_imag_grad_out and check_grad_dtypes:
         | 
| 375 | 
            +
                        return fail_test('Gradient (calculated using complex valued grad output) has dtype mismatch')
         | 
| 376 | 
            +
             | 
| 377 | 
            +
                    if not correct_grad_sizes:
         | 
| 378 | 
            +
                        return fail_test('Analytical gradient has incorrect size')
         | 
| 379 | 
            +
             | 
| 380 | 
            +
                    if out_is_complex and not correct_grad_sizes_with_imag_grad_out:
         | 
| 381 | 
            +
                        return fail_test('Analytical gradient (calculated using complex valued grad output) has incorrect size')
         | 
| 382 | 
            +
             | 
| 383 | 
            +
                    def checkIfNumericalAnalyticAreClose(a, n, j, error_str=''):
         | 
| 384 | 
            +
                        if not torch.allclose(a, n, rtol, atol):
         | 
| 385 | 
            +
                            return fail_test(error_str + 'Jacobian mismatch for output %d with respect to input %d,\n'
         | 
| 386 | 
            +
                                             'numerical:%s\nanalytical:%s\n' % (i, j, n, a))
         | 
| 387 | 
            +
             | 
| 388 | 
            +
                    inp_tensors = iter_tensors(tupled_inputs, True)
         | 
| 389 | 
            +
             | 
| 390 | 
            +
                    for j, (a, n, inp) in enumerate(zip(analytical, numerical, inp_tensors)):
         | 
| 391 | 
            +
                        if a.numel() != 0 or n.numel() != 0:
         | 
| 392 | 
            +
                            if o.is_complex():
         | 
| 393 | 
            +
                                # C -> C, R -> C
         | 
| 394 | 
            +
                                a_with_imag_grad_out = analytical_with_imag_grad_out[j]
         | 
| 395 | 
            +
                                n_with_imag_grad_out = numerical_with_imag_grad_out[j]
         | 
| 396 | 
            +
                                checkIfNumericalAnalyticAreClose(a_with_imag_grad_out, n_with_imag_grad_out, j,
         | 
| 397 | 
            +
                                                                 "Gradients failed to compare equal for grad output = 1j. ")
         | 
| 398 | 
            +
                            if inp.is_complex():
         | 
| 399 | 
            +
                                # C -> R, C -> C
         | 
| 400 | 
            +
                                checkIfNumericalAnalyticAreClose(a, n, j,
         | 
| 401 | 
            +
                                                                 "Gradients failed to compare equal for grad output = 1. ")
         | 
| 402 | 
            +
                            else:
         | 
| 403 | 
            +
                                # R -> R, R -> C
         | 
| 404 | 
            +
                                checkIfNumericalAnalyticAreClose(a, n, j)
         | 
| 405 | 
            +
             | 
| 406 | 
            +
             | 
| 407 | 
            +
                    def not_reentrant_error(error_str=''):
         | 
| 408 | 
            +
                        error_msg = "Backward" + error_str + " is not reentrant, i.e., running backward with same \
         | 
| 409 | 
            +
                                    input and grad_output multiple times gives different values, \
         | 
| 410 | 
            +
                                    although analytical gradient matches numerical gradient. \
         | 
| 411 | 
            +
                                    The tolerance for nondeterminism was {}.".format(nondet_tol)
         | 
| 412 | 
            +
                        return fail_test(error_msg)
         | 
| 413 | 
            +
             | 
| 414 | 
            +
                    if not reentrant:
         | 
| 415 | 
            +
                        return not_reentrant_error()
         | 
| 416 | 
            +
             | 
| 417 | 
            +
                    if out_is_complex and not reentrant_with_imag_grad_out:
         | 
| 418 | 
            +
                        return not_reentrant_error(' (calculated using complex valued grad output)')
         | 
| 419 | 
            +
             | 
| 420 | 
            +
                # check if the backward multiplies by grad_output
         | 
| 421 | 
            +
                output = _differentiable_outputs(func(*tupled_inputs))
         | 
| 422 | 
            +
                if any([o.requires_grad for o in output]):
         | 
| 423 | 
            +
                    diff_input_list: List[torch.Tensor] = list(iter_tensors(tupled_inputs, True))
         | 
| 424 | 
            +
                    if not diff_input_list:
         | 
| 425 | 
            +
                        raise RuntimeError("no Tensors requiring grad found in input")
         | 
| 426 | 
            +
                    grads_input = torch.autograd.grad(output, diff_input_list,
         | 
| 427 | 
            +
                                                      [torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) for o in output],
         | 
| 428 | 
            +
                                                      allow_unused=True)
         | 
| 429 | 
            +
                    for gi, di in zip(grads_input, diff_input_list):
         | 
| 430 | 
            +
                        if gi is None:
         | 
| 431 | 
            +
                            continue
         | 
| 432 | 
            +
                        if isinstance(gi, torch.Tensor) and gi.layout != torch.strided:
         | 
| 433 | 
            +
                            if gi.layout != di.layout:
         | 
| 434 | 
            +
                                return fail_test('grad is incorrect layout (' + str(gi.layout) + ' is not ' + str(di.layout) + ')')
         | 
| 435 | 
            +
                            if gi.layout == torch.sparse_coo:
         | 
| 436 | 
            +
                                if gi.sparse_dim() != di.sparse_dim():
         | 
| 437 | 
            +
                                    return fail_test('grad is sparse tensor, but has incorrect sparse_dim')
         | 
| 438 | 
            +
                                if gi.dense_dim() != di.dense_dim():
         | 
| 439 | 
            +
                                    return fail_test('grad is sparse tensor, but has incorrect dense_dim')
         | 
| 440 | 
            +
                            gi = gi.to_dense()
         | 
| 441 | 
            +
                            di = di.to_dense()
         | 
| 442 | 
            +
                        if not gi.eq(0).all():
         | 
| 443 | 
            +
                            return fail_test('backward not multiplied by grad_output')
         | 
| 444 | 
            +
                        if gi.dtype != di.dtype or gi.device != di.device or gi.is_sparse != di.is_sparse:
         | 
| 445 | 
            +
                            return fail_test("grad is incorrect type")
         | 
| 446 | 
            +
                        if gi.size() != di.size():
         | 
| 447 | 
            +
                            return fail_test('grad is incorrect size')
         | 
| 448 | 
            +
             | 
| 449 | 
            +
                    if check_undefined_grad:
         | 
| 450 | 
            +
                        def warn_bc_breaking():
         | 
| 451 | 
            +
                            warnings.warn((
         | 
| 452 | 
            +
                                'Backwards compatibility: New undefined gradient support checking '
         | 
| 453 | 
            +
                                'feature is enabled by default, but it may break existing callers '
         | 
| 454 | 
            +
                                'of this function. If this is true for you, you can call this '
         | 
| 455 | 
            +
                                'function with "check_undefined_grad=False" to disable the feature'))
         | 
| 456 | 
            +
             | 
| 457 | 
            +
                        def check_undefined_grad_support(output_to_check):
         | 
| 458 | 
            +
                            grads_output = [torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) for o in output_to_check]
         | 
| 459 | 
            +
                            try:
         | 
| 460 | 
            +
                                grads_input = torch.autograd.grad(output_to_check,
         | 
| 461 | 
            +
                                                                  diff_input_list,
         | 
| 462 | 
            +
                                                                  grads_output,
         | 
| 463 | 
            +
                                                                  allow_unused=True)
         | 
| 464 | 
            +
                            except RuntimeError:
         | 
| 465 | 
            +
                                warn_bc_breaking()
         | 
| 466 | 
            +
                                return fail_test((
         | 
| 467 | 
            +
                                    'Expected backward function to handle undefined output grads. '
         | 
| 468 | 
            +
                                    'Please look at "Notes about undefined output gradients" in '
         | 
| 469 | 
            +
                                    '"tools/autograd/derivatives.yaml"'))
         | 
| 470 | 
            +
             | 
| 471 | 
            +
                            for gi, i in zip(grads_input, diff_input_list):
         | 
| 472 | 
            +
                                if (gi is not None) and (not gi.eq(0).all()):
         | 
| 473 | 
            +
                                    warn_bc_breaking()
         | 
| 474 | 
            +
                                    return fail_test((
         | 
| 475 | 
            +
                                        'Expected all input grads to be undefined or zero when all output grads are undefined '
         | 
| 476 | 
            +
                                        'or zero. Please look at "Notes about undefined output gradients" in '
         | 
| 477 | 
            +
                                        '"tools/autograd/derivatives.yaml"'))
         | 
| 478 | 
            +
                            return True
         | 
| 479 | 
            +
             | 
| 480 | 
            +
                        # All backward functions must work properly if all output grads are undefined
         | 
| 481 | 
            +
                        outputs_to_check = [[
         | 
| 482 | 
            +
                            torch._C._functions.UndefinedGrad()(o) for o in _differentiable_outputs(func(*tupled_inputs))
         | 
| 483 | 
            +
                            # This check filters out Tensor-likes that aren't instances of Tensor.
         | 
| 484 | 
            +
                            if isinstance(o, torch.Tensor)
         | 
| 485 | 
            +
                        ]]
         | 
| 486 | 
            +
             | 
| 487 | 
            +
                        # If there are multiple output grads, we should be able to undef one at a time without error
         | 
| 488 | 
            +
                        if len(outputs_to_check[0]) > 1:
         | 
| 489 | 
            +
                            for undef_grad_idx in range(len(output)):
         | 
| 490 | 
            +
                                output_to_check = _differentiable_outputs(func(*tupled_inputs))
         | 
| 491 | 
            +
                                outputs_to_check.append([
         | 
| 492 | 
            +
                                    torch._C._functions.UndefinedGrad()(o) if idx == undef_grad_idx else o
         | 
| 493 | 
            +
                                    for idx, o in enumerate(output_to_check)])
         | 
| 494 | 
            +
             | 
| 495 | 
            +
                        for output_to_check in outputs_to_check:
         | 
| 496 | 
            +
                            if not check_undefined_grad_support(output_to_check):
         | 
| 497 | 
            +
                                return False
         | 
| 498 | 
            +
             | 
| 499 | 
            +
                return True
         | 
| 500 | 
            +
             | 
| 501 | 
            +
             | 
| 502 | 
            +
            def gradgradcheck(
         | 
| 503 | 
            +
                func: Callable[..., _TensorOrTensors],  # See Note [VarArg of Tensors]
         | 
| 504 | 
            +
                inputs: _TensorOrTensors,
         | 
| 505 | 
            +
                grad_outputs: Optional[_TensorOrTensors] = None,
         | 
| 506 | 
            +
                eps: float = 1e-6,
         | 
| 507 | 
            +
                atol: float = 1e-5,
         | 
| 508 | 
            +
                rtol: float = 1e-3,
         | 
| 509 | 
            +
                gen_non_contig_grad_outputs: bool = False,
         | 
| 510 | 
            +
                raise_exception: bool = True,
         | 
| 511 | 
            +
                nondet_tol: float = 0.0,
         | 
| 512 | 
            +
                check_undefined_grad: bool = True,
         | 
| 513 | 
            +
                check_grad_dtypes: bool = False
         | 
| 514 | 
            +
            ) -> bool:
         | 
| 515 | 
            +
                r"""Check gradients of gradients computed via small finite differences
         | 
| 516 | 
            +
                against analytical gradients w.r.t. tensors in :attr:`inputs` and
         | 
| 517 | 
            +
                :attr:`grad_outputs` that are of floating point or complex type and with
         | 
| 518 | 
            +
                ``requires_grad=True``.
         | 
| 519 | 
            +
             | 
| 520 | 
            +
                This function checks that backpropagating through the gradients computed
         | 
| 521 | 
            +
                to the given :attr:`grad_outputs` are correct.
         | 
| 522 | 
            +
             | 
| 523 | 
            +
                The check between numerical and analytical gradients uses :func:`~torch.allclose`.
         | 
| 524 | 
            +
             | 
| 525 | 
            +
                .. note::
         | 
| 526 | 
            +
                    The default values are designed for :attr:`input` and
         | 
| 527 | 
            +
                    :attr:`grad_outputs` of double precision. This check will likely fail if
         | 
| 528 | 
            +
                    they are of less precision, e.g., ``FloatTensor``.
         | 
| 529 | 
            +
             | 
| 530 | 
            +
                .. warning::
         | 
| 531 | 
            +
                   If any checked tensor in :attr:`input` and :attr:`grad_outputs` has
         | 
| 532 | 
            +
                   overlapping memory, i.e., different indices pointing to the same memory
         | 
| 533 | 
            +
                   address (e.g., from :func:`torch.expand`), this check will likely fail
         | 
| 534 | 
            +
                   because the numerical gradients computed by point perturbation at such
         | 
| 535 | 
            +
                   indices will change values at all other indices that share the same
         | 
| 536 | 
            +
                   memory address.
         | 
| 537 | 
            +
             | 
| 538 | 
            +
                Args:
         | 
| 539 | 
            +
                    func (function): a Python function that takes Tensor inputs and returns
         | 
| 540 | 
            +
                        a Tensor or a tuple of Tensors
         | 
| 541 | 
            +
                    inputs (tuple of Tensor or Tensor): inputs to the function
         | 
| 542 | 
            +
                    grad_outputs (tuple of Tensor or Tensor, optional): The gradients with
         | 
| 543 | 
            +
                        respect to the function's outputs.
         | 
| 544 | 
            +
                    eps (float, optional): perturbation for finite differences
         | 
| 545 | 
            +
                    atol (float, optional): absolute tolerance
         | 
| 546 | 
            +
                    rtol (float, optional): relative tolerance
         | 
| 547 | 
            +
                    gen_non_contig_grad_outputs (bool, optional): if :attr:`grad_outputs` is
         | 
| 548 | 
            +
                        ``None`` and :attr:`gen_non_contig_grad_outputs` is ``True``, the
         | 
| 549 | 
            +
                        randomly generated gradient outputs are made to be noncontiguous
         | 
| 550 | 
            +
                    raise_exception (bool, optional): indicating whether to raise an exception if
         | 
| 551 | 
            +
                        the check fails. The exception gives more information about the
         | 
| 552 | 
            +
                        exact nature of the failure. This is helpful when debugging gradchecks.
         | 
| 553 | 
            +
                    nondet_tol (float, optional): tolerance for non-determinism. When running
         | 
| 554 | 
            +
                        identical inputs through the differentiation, the results must either match
         | 
| 555 | 
            +
                        exactly (default, 0.0) or be within this tolerance. Note that a small amount
         | 
| 556 | 
            +
                        of nondeterminism in the gradient will lead to larger inaccuracies in
         | 
| 557 | 
            +
                        the second derivative.
         | 
| 558 | 
            +
                    check_undefined_grad (bool, options): if True, check if undefined output grads
         | 
| 559 | 
            +
                        are supported and treated as zeros
         | 
| 560 | 
            +
             | 
| 561 | 
            +
                Returns:
         | 
| 562 | 
            +
                    True if all differences satisfy allclose condition
         | 
| 563 | 
            +
                """
         | 
| 564 | 
            +
                tupled_inputs = _as_tuple(inputs)
         | 
| 565 | 
            +
             | 
| 566 | 
            +
                if grad_outputs is None:
         | 
| 567 | 
            +
                    # If grad_outputs is not specified, create random Tensors of the same
         | 
| 568 | 
            +
                    # shape, type, and device as the outputs
         | 
| 569 | 
            +
                    def randn_like(x):
         | 
| 570 | 
            +
                        y = torch.testing.randn_like(
         | 
| 571 | 
            +
                            x if (x.is_floating_point() or x.is_complex()) else x.double(), memory_format=torch.legacy_contiguous_format)
         | 
| 572 | 
            +
                        if gen_non_contig_grad_outputs:
         | 
| 573 | 
            +
                            y = torch.testing.make_non_contiguous(y)
         | 
| 574 | 
            +
                        return y.requires_grad_()
         | 
| 575 | 
            +
                    outputs = _as_tuple(func(*tupled_inputs))
         | 
| 576 | 
            +
                    tupled_grad_outputs = tuple(randn_like(x) for x in outputs)
         | 
| 577 | 
            +
                else:
         | 
| 578 | 
            +
                    tupled_grad_outputs = _as_tuple(grad_outputs)
         | 
| 579 | 
            +
             | 
| 580 | 
            +
                num_outputs = len(tupled_grad_outputs)
         | 
| 581 | 
            +
             | 
| 582 | 
            +
                def new_func(*args):
         | 
| 583 | 
            +
                    input_args = args[:-num_outputs]
         | 
| 584 | 
            +
                    grad_outputs = args[-num_outputs:]
         | 
| 585 | 
            +
                    outputs = _differentiable_outputs(func(*input_args))
         | 
| 586 | 
            +
                    input_args = tuple(x for x in input_args if isinstance(x, torch.Tensor) and x.requires_grad)
         | 
| 587 | 
            +
                    grad_inputs = torch.autograd.grad(outputs, input_args, grad_outputs, create_graph=True)
         | 
| 588 | 
            +
                    return grad_inputs
         | 
| 589 | 
            +
             | 
| 590 | 
            +
                return gradcheck(new_func, tupled_inputs + tupled_grad_outputs, eps, atol, rtol, raise_exception,
         | 
| 591 | 
            +
                                 nondet_tol=nondet_tol, check_undefined_grad=check_undefined_grad,
         | 
| 592 | 
            +
                                 check_grad_dtypes=check_grad_dtypes)
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/group_ops.py
    ADDED
    
    | @@ -0,0 +1,102 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import lietorch_backends
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            import torch.nn.functional as F
         | 
| 4 | 
            +
             | 
| 5 | 
            +
             | 
| 6 | 
            +
             | 
| 7 | 
            +
            class GroupOp(torch.autograd.Function):
         | 
| 8 | 
            +
                """ group operation base class """
         | 
| 9 | 
            +
             | 
| 10 | 
            +
                @classmethod
         | 
| 11 | 
            +
                def forward(cls, ctx, group_id, *inputs):
         | 
| 12 | 
            +
                    ctx.group_id = group_id
         | 
| 13 | 
            +
                    ctx.save_for_backward(*inputs)
         | 
| 14 | 
            +
                    out = cls.forward_op(ctx.group_id, *inputs)
         | 
| 15 | 
            +
                    return out
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                @classmethod
         | 
| 18 | 
            +
                def backward(cls, ctx, grad):
         | 
| 19 | 
            +
                    error_str = "Backward operation not implemented for {}".format(cls)
         | 
| 20 | 
            +
                    assert cls.backward_op is not None, error_str
         | 
| 21 | 
            +
             | 
| 22 | 
            +
                    inputs = ctx.saved_tensors
         | 
| 23 | 
            +
                    grad = grad.contiguous()
         | 
| 24 | 
            +
                    grad_inputs = cls.backward_op(ctx.group_id, grad, *inputs)
         | 
| 25 | 
            +
                    return (None, ) + tuple(grad_inputs)
         | 
| 26 | 
            +
                    
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            class Exp(GroupOp):
         | 
| 29 | 
            +
                """ exponential map """
         | 
| 30 | 
            +
                forward_op, backward_op = lietorch_backends.expm, lietorch_backends.expm_backward
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            class Log(GroupOp):
         | 
| 33 | 
            +
                """ logarithm map """
         | 
| 34 | 
            +
                forward_op, backward_op = lietorch_backends.logm, lietorch_backends.logm_backward
         | 
| 35 | 
            +
             | 
| 36 | 
            +
            class Inv(GroupOp):
         | 
| 37 | 
            +
                """ group inverse """
         | 
| 38 | 
            +
                forward_op, backward_op = lietorch_backends.inv, lietorch_backends.inv_backward
         | 
| 39 | 
            +
             | 
| 40 | 
            +
            class Mul(GroupOp):
         | 
| 41 | 
            +
                """ group multiplication """
         | 
| 42 | 
            +
                forward_op, backward_op = lietorch_backends.mul, lietorch_backends.mul_backward
         | 
| 43 | 
            +
             | 
| 44 | 
            +
            class Adj(GroupOp):
         | 
| 45 | 
            +
                """ adjoint operator """
         | 
| 46 | 
            +
                forward_op, backward_op = lietorch_backends.adj, lietorch_backends.adj_backward
         | 
| 47 | 
            +
             | 
| 48 | 
            +
            class AdjT(GroupOp):
         | 
| 49 | 
            +
                """ adjoint operator """
         | 
| 50 | 
            +
                forward_op, backward_op = lietorch_backends.adjT, lietorch_backends.adjT_backward
         | 
| 51 | 
            +
             | 
| 52 | 
            +
            class Act3(GroupOp):
         | 
| 53 | 
            +
                """ action on point """
         | 
| 54 | 
            +
                forward_op, backward_op = lietorch_backends.act, lietorch_backends.act_backward
         | 
| 55 | 
            +
             | 
| 56 | 
            +
            class Act4(GroupOp):
         | 
| 57 | 
            +
                """ action on point """
         | 
| 58 | 
            +
                forward_op, backward_op = lietorch_backends.act4, lietorch_backends.act4_backward
         | 
| 59 | 
            +
             | 
| 60 | 
            +
            class Jinv(GroupOp):
         | 
| 61 | 
            +
                """ adjoint operator """
         | 
| 62 | 
            +
                forward_op, backward_op = lietorch_backends.Jinv, None
         | 
| 63 | 
            +
             | 
| 64 | 
            +
            class ToMatrix(GroupOp):
         | 
| 65 | 
            +
                """ convert to matrix representation """
         | 
| 66 | 
            +
                forward_op, backward_op = lietorch_backends.as_matrix, None
         | 
| 67 | 
            +
             | 
| 68 | 
            +
             | 
| 69 | 
            +
             | 
| 70 | 
            +
             | 
| 71 | 
            +
            ### conversion operations to/from Euclidean embeddings ###
         | 
| 72 | 
            +
             | 
| 73 | 
            +
            class FromVec(torch.autograd.Function):
         | 
| 74 | 
            +
                """ convert vector into group object """
         | 
| 75 | 
            +
             | 
| 76 | 
            +
                @classmethod
         | 
| 77 | 
            +
                def forward(cls, ctx, group_id, *inputs):
         | 
| 78 | 
            +
                    ctx.group_id = group_id
         | 
| 79 | 
            +
                    ctx.save_for_backward(*inputs)
         | 
| 80 | 
            +
                    return inputs[0]
         | 
| 81 | 
            +
             | 
| 82 | 
            +
                @classmethod
         | 
| 83 | 
            +
                def backward(cls, ctx, grad):
         | 
| 84 | 
            +
                    inputs = ctx.saved_tensors
         | 
| 85 | 
            +
                    J = lietorch_backends.projector(ctx.group_id, *inputs)
         | 
| 86 | 
            +
                    return None, torch.matmul(grad.unsqueeze(-2), torch.linalg.pinv(J)).squeeze(-2)
         | 
| 87 | 
            +
             | 
| 88 | 
            +
            class ToVec(torch.autograd.Function):
         | 
| 89 | 
            +
                """ convert group object to vector """
         | 
| 90 | 
            +
             | 
| 91 | 
            +
                @classmethod
         | 
| 92 | 
            +
                def forward(cls, ctx, group_id, *inputs):
         | 
| 93 | 
            +
                    ctx.group_id = group_id
         | 
| 94 | 
            +
                    ctx.save_for_backward(*inputs)
         | 
| 95 | 
            +
                    return inputs[0]
         | 
| 96 | 
            +
             | 
| 97 | 
            +
                @classmethod
         | 
| 98 | 
            +
                def backward(cls, ctx, grad):
         | 
| 99 | 
            +
                    inputs = ctx.saved_tensors
         | 
| 100 | 
            +
                    J = lietorch_backends.projector(ctx.group_id, *inputs)
         | 
| 101 | 
            +
                    return None, torch.matmul(grad.unsqueeze(-2), J).squeeze(-2)
         | 
| 102 | 
            +
             | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/groups.py
    ADDED
    
    | @@ -0,0 +1,322 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import numpy as np
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            # group operations implemented in cuda
         | 
| 5 | 
            +
            from .group_ops import Exp, Log, Inv, Mul, Adj, AdjT, Jinv, Act3, Act4, ToMatrix, ToVec, FromVec
         | 
| 6 | 
            +
            from .broadcasting import broadcast_inputs
         | 
| 7 | 
            +
             | 
| 8 | 
            +
             | 
| 9 | 
            +
            class LieGroupParameter(torch.Tensor):
         | 
| 10 | 
            +
                """ Wrapper class for LieGroup """
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                from torch._C import _disabled_torch_function_impl
         | 
| 13 | 
            +
                __torch_function__ = _disabled_torch_function_impl
         | 
| 14 | 
            +
             | 
| 15 | 
            +
                def __new__(cls, group, requires_grad=True):
         | 
| 16 | 
            +
                    data = torch.zeros(group.tangent_shape, 
         | 
| 17 | 
            +
                                       device=group.data.device, 
         | 
| 18 | 
            +
                                       dtype=group.data.dtype, 
         | 
| 19 | 
            +
                                       requires_grad=True)
         | 
| 20 | 
            +
             | 
| 21 | 
            +
                    return torch.Tensor._make_subclass(cls, data, requires_grad)
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                def __init__(self, group):
         | 
| 24 | 
            +
                    self.group = group
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                def retr(self):
         | 
| 27 | 
            +
                    return self.group.retr(self)
         | 
| 28 | 
            +
             | 
| 29 | 
            +
                def log(self):
         | 
| 30 | 
            +
                    return self.retr().log()
         | 
| 31 | 
            +
             | 
| 32 | 
            +
                def inv(self):
         | 
| 33 | 
            +
                    return self.retr().inv()
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                def adj(self, a):
         | 
| 36 | 
            +
                    return self.retr().adj(a)
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                def __mul__(self, other):
         | 
| 39 | 
            +
                    if isinstance(other, LieGroupParameter):
         | 
| 40 | 
            +
                        return self.retr() * other.retr()
         | 
| 41 | 
            +
                    else:
         | 
| 42 | 
            +
                        return self.retr() * other
         | 
| 43 | 
            +
             | 
| 44 | 
            +
                def add_(self, update, alpha):
         | 
| 45 | 
            +
                    self.group = self.group.exp(alpha*update) * self.group
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                def __getitem__(self, index):
         | 
| 48 | 
            +
                    return self.retr().__getitem__(index)
         | 
| 49 | 
            +
             | 
| 50 | 
            +
             | 
| 51 | 
            +
            class LieGroup:
         | 
| 52 | 
            +
                """ Base class for Lie Group """
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                def __init__(self, data):
         | 
| 55 | 
            +
                    self.data = data
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                def __repr__(self):
         | 
| 58 | 
            +
                    return "{}: size={}, device={}, dtype={}".format(
         | 
| 59 | 
            +
                        self.group_name, self.shape, self.device, self.dtype)
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                @property
         | 
| 62 | 
            +
                def shape(self):
         | 
| 63 | 
            +
                    return self.data.shape[:-1]
         | 
| 64 | 
            +
             | 
| 65 | 
            +
                @property
         | 
| 66 | 
            +
                def device(self):
         | 
| 67 | 
            +
                    return self.data.device
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                @property
         | 
| 70 | 
            +
                def dtype(self):
         | 
| 71 | 
            +
                    return self.data.dtype
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                def vec(self):
         | 
| 74 | 
            +
                    return self.apply_op(ToVec, self.data)
         | 
| 75 | 
            +
             | 
| 76 | 
            +
                @property
         | 
| 77 | 
            +
                def tangent_shape(self):
         | 
| 78 | 
            +
                    return self.data.shape[:-1] + (self.manifold_dim,)
         | 
| 79 | 
            +
             | 
| 80 | 
            +
                @classmethod
         | 
| 81 | 
            +
                def Identity(cls, *batch_shape, **kwargs):
         | 
| 82 | 
            +
                    """ Construct identity element with batch shape """
         | 
| 83 | 
            +
                    
         | 
| 84 | 
            +
                    if isinstance(batch_shape[0], tuple):
         | 
| 85 | 
            +
                        batch_shape = batch_shape[0]
         | 
| 86 | 
            +
                    
         | 
| 87 | 
            +
                    elif isinstance(batch_shape[0], list):
         | 
| 88 | 
            +
                        batch_shape = tuple(batch_shape[0])
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                    numel = np.prod(batch_shape)
         | 
| 91 | 
            +
                    data = cls.id_elem.reshape(1,-1)
         | 
| 92 | 
            +
             | 
| 93 | 
            +
                    if 'device' in kwargs:
         | 
| 94 | 
            +
                        data = data.to(kwargs['device'])
         | 
| 95 | 
            +
             | 
| 96 | 
            +
                    if 'dtype' in kwargs:
         | 
| 97 | 
            +
                        data = data.type(kwargs['dtype'])
         | 
| 98 | 
            +
             | 
| 99 | 
            +
                    data = data.repeat(numel, 1)
         | 
| 100 | 
            +
                    return cls(data).view(batch_shape)
         | 
| 101 | 
            +
             | 
| 102 | 
            +
                @classmethod
         | 
| 103 | 
            +
                def IdentityLike(cls, G):
         | 
| 104 | 
            +
                    return cls.Identity(G.shape, device=G.data.device, dtype=G.data.dtype)
         | 
| 105 | 
            +
             | 
| 106 | 
            +
                @classmethod
         | 
| 107 | 
            +
                def InitFromVec(cls, data):
         | 
| 108 | 
            +
                    return cls(cls.apply_op(FromVec, data))
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                @classmethod
         | 
| 111 | 
            +
                def Random(cls, *batch_shape, sigma=1.0, **kwargs):
         | 
| 112 | 
            +
                    """ Construct random element with batch_shape by random sampling in tangent space"""
         | 
| 113 | 
            +
             | 
| 114 | 
            +
                    if isinstance(batch_shape[0], tuple):
         | 
| 115 | 
            +
                        batch_shape = batch_shape[0]
         | 
| 116 | 
            +
                    
         | 
| 117 | 
            +
                    elif isinstance(batch_shape[0], list):
         | 
| 118 | 
            +
                        batch_shape = tuple(batch_shape[0])
         | 
| 119 | 
            +
                    
         | 
| 120 | 
            +
                    tangent_shape = batch_shape + (cls.manifold_dim,)
         | 
| 121 | 
            +
                    xi = torch.randn(tangent_shape, **kwargs)
         | 
| 122 | 
            +
                    return cls.exp(sigma * xi)
         | 
| 123 | 
            +
             | 
| 124 | 
            +
                @classmethod
         | 
| 125 | 
            +
                def apply_op(cls, op, x, y=None):
         | 
| 126 | 
            +
                    """ Apply group operator """
         | 
| 127 | 
            +
                    inputs, out_shape = broadcast_inputs(x, y)
         | 
| 128 | 
            +
             | 
| 129 | 
            +
                    data = op.apply(cls.group_id, *inputs)
         | 
| 130 | 
            +
                    return data.view(out_shape + (-1,))
         | 
| 131 | 
            +
             | 
| 132 | 
            +
                @classmethod
         | 
| 133 | 
            +
                def exp(cls, x):
         | 
| 134 | 
            +
                    """ exponential map: x -> X """
         | 
| 135 | 
            +
                    return cls(cls.apply_op(Exp, x))
         | 
| 136 | 
            +
             | 
| 137 | 
            +
                def quaternion(self):
         | 
| 138 | 
            +
                    """ extract quaternion """
         | 
| 139 | 
            +
                    return self.apply_op(Quat, self.data)
         | 
| 140 | 
            +
             | 
| 141 | 
            +
                def log(self):
         | 
| 142 | 
            +
                    """ logarithm map """
         | 
| 143 | 
            +
                    return self.apply_op(Log, self.data)
         | 
| 144 | 
            +
             | 
| 145 | 
            +
                def inv(self):
         | 
| 146 | 
            +
                    """ group inverse """
         | 
| 147 | 
            +
                    return self.__class__(self.apply_op(Inv, self.data))
         | 
| 148 | 
            +
             | 
| 149 | 
            +
                def mul(self, other):
         | 
| 150 | 
            +
                    """ group multiplication """
         | 
| 151 | 
            +
                    return self.__class__(self.apply_op(Mul, self.data, other.data))
         | 
| 152 | 
            +
             | 
| 153 | 
            +
                def retr(self, a):
         | 
| 154 | 
            +
                    """ retraction: Exp(a) * X """
         | 
| 155 | 
            +
                    dX = self.__class__.apply_op(Exp, a)
         | 
| 156 | 
            +
                    return self.__class__(self.apply_op(Mul, dX, self.data))
         | 
| 157 | 
            +
             | 
| 158 | 
            +
                def adj(self, a):
         | 
| 159 | 
            +
                    """ adjoint operator: b = A(X) * a """
         | 
| 160 | 
            +
                    return self.apply_op(Adj, self.data, a)
         | 
| 161 | 
            +
             | 
| 162 | 
            +
                def adjT(self, a):
         | 
| 163 | 
            +
                    """ transposed adjoint operator: b = a * A(X) """
         | 
| 164 | 
            +
                    return self.apply_op(AdjT, self.data, a)
         | 
| 165 | 
            +
             | 
| 166 | 
            +
                def Jinv(self, a):
         | 
| 167 | 
            +
                    return self.apply_op(Jinv, self.data, a)
         | 
| 168 | 
            +
             | 
| 169 | 
            +
                def act(self, p):
         | 
| 170 | 
            +
                    """ action on a point cloud """
         | 
| 171 | 
            +
                    
         | 
| 172 | 
            +
                    # action on point
         | 
| 173 | 
            +
                    if p.shape[-1] == 3:
         | 
| 174 | 
            +
                        return self.apply_op(Act3, self.data, p)
         | 
| 175 | 
            +
                    
         | 
| 176 | 
            +
                    # action on homogeneous point
         | 
| 177 | 
            +
                    elif p.shape[-1] == 4:
         | 
| 178 | 
            +
                        return self.apply_op(Act4, self.data, p)
         | 
| 179 | 
            +
             | 
| 180 | 
            +
                def matrix(self):
         | 
| 181 | 
            +
                    """ convert element to 4x4 matrix """
         | 
| 182 | 
            +
                    I = torch.eye(4, dtype=self.dtype, device=self.device)
         | 
| 183 | 
            +
                    I = I.view([1] * (len(self.data.shape) - 1) + [4, 4])
         | 
| 184 | 
            +
                    return self.__class__(self.data[...,None,:]).act(I).transpose(-1,-2)
         | 
| 185 | 
            +
             | 
| 186 | 
            +
                def translation(self):
         | 
| 187 | 
            +
                    """ extract translation component """
         | 
| 188 | 
            +
                    p = torch.as_tensor([0.0, 0.0, 0.0, 1.0], dtype=self.dtype, device=self.device)
         | 
| 189 | 
            +
                    p = p.view([1] * (len(self.data.shape) - 1) + [4,])
         | 
| 190 | 
            +
                    return self.apply_op(Act4, self.data, p)
         | 
| 191 | 
            +
             | 
| 192 | 
            +
                def detach(self):
         | 
| 193 | 
            +
                    return self.__class__(self.data.detach())
         | 
| 194 | 
            +
             | 
| 195 | 
            +
                def view(self, dims):
         | 
| 196 | 
            +
                    data_reshaped = self.data.view(dims + (self.embedded_dim,))
         | 
| 197 | 
            +
                    return self.__class__(data_reshaped)
         | 
| 198 | 
            +
             | 
| 199 | 
            +
                def __mul__(self, other):
         | 
| 200 | 
            +
                    # group multiplication
         | 
| 201 | 
            +
             | 
| 202 | 
            +
                    if isinstance(other, LieGroup):
         | 
| 203 | 
            +
                        return self.mul(other)
         | 
| 204 | 
            +
             | 
| 205 | 
            +
                    # action on point
         | 
| 206 | 
            +
                    elif isinstance(other, torch.Tensor):
         | 
| 207 | 
            +
                        return self.act(other)
         | 
| 208 | 
            +
             | 
| 209 | 
            +
                def __getitem__(self, index):
         | 
| 210 | 
            +
                    return self.__class__(self.data[index])
         | 
| 211 | 
            +
             | 
| 212 | 
            +
                def __setitem__(self, index, item):
         | 
| 213 | 
            +
                    self.data[index] = item.data
         | 
| 214 | 
            +
             | 
| 215 | 
            +
                def to(self, *args, **kwargs):
         | 
| 216 | 
            +
                    return self.__class__(self.data.to(*args, **kwargs))
         | 
| 217 | 
            +
             | 
| 218 | 
            +
                def cpu(self):
         | 
| 219 | 
            +
                    return self.__class__(self.data.cpu())
         | 
| 220 | 
            +
             | 
| 221 | 
            +
                def cuda(self):
         | 
| 222 | 
            +
                    return self.__class__(self.data.cuda())
         | 
| 223 | 
            +
             | 
| 224 | 
            +
                def float(self, device):
         | 
| 225 | 
            +
                    return self.__class__(self.data.float())
         | 
| 226 | 
            +
             | 
| 227 | 
            +
                def double(self, device):
         | 
| 228 | 
            +
                    return self.__class__(self.data.double())
         | 
| 229 | 
            +
             | 
| 230 | 
            +
                def unbind(self, dim=0):
         | 
| 231 | 
            +
                    return [self.__class__(x) for x in self.data.unbind(dim=dim)]
         | 
| 232 | 
            +
                    
         | 
| 233 | 
            +
             | 
| 234 | 
            +
            class SO3(LieGroup):
         | 
| 235 | 
            +
                group_name = 'SO3'
         | 
| 236 | 
            +
                group_id = 1
         | 
| 237 | 
            +
                manifold_dim = 3
         | 
| 238 | 
            +
                embedded_dim = 4
         | 
| 239 | 
            +
                
         | 
| 240 | 
            +
                # unit quaternion
         | 
| 241 | 
            +
                id_elem = torch.as_tensor([0.0, 0.0, 0.0, 1.0])
         | 
| 242 | 
            +
             | 
| 243 | 
            +
                def __init__(self, data):
         | 
| 244 | 
            +
                    if isinstance(data, SE3):
         | 
| 245 | 
            +
                        data = data.data[..., 3:7]
         | 
| 246 | 
            +
             | 
| 247 | 
            +
                    super(SO3, self).__init__(data)
         | 
| 248 | 
            +
             | 
| 249 | 
            +
             | 
| 250 | 
            +
            class RxSO3(LieGroup):
         | 
| 251 | 
            +
                group_name = 'RxSO3'
         | 
| 252 | 
            +
                group_id = 2
         | 
| 253 | 
            +
                manifold_dim = 4
         | 
| 254 | 
            +
                embedded_dim = 5
         | 
| 255 | 
            +
                
         | 
| 256 | 
            +
                # unit quaternion
         | 
| 257 | 
            +
                id_elem = torch.as_tensor([0.0, 0.0, 0.0, 1.0, 1.0])
         | 
| 258 | 
            +
             | 
| 259 | 
            +
                def __init__(self, data):
         | 
| 260 | 
            +
                    if isinstance(data, Sim3):
         | 
| 261 | 
            +
                        data = data.data[..., 3:8]
         | 
| 262 | 
            +
             | 
| 263 | 
            +
                    super(RxSO3, self).__init__(data)
         | 
| 264 | 
            +
             | 
| 265 | 
            +
             | 
| 266 | 
            +
            class SE3(LieGroup):
         | 
| 267 | 
            +
                group_name = 'SE3'
         | 
| 268 | 
            +
                group_id = 3
         | 
| 269 | 
            +
                manifold_dim = 6
         | 
| 270 | 
            +
                embedded_dim = 7
         | 
| 271 | 
            +
             | 
| 272 | 
            +
                # translation, unit quaternion
         | 
| 273 | 
            +
                id_elem = torch.as_tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0])
         | 
| 274 | 
            +
             | 
| 275 | 
            +
                def __init__(self, data):
         | 
| 276 | 
            +
                    if isinstance(data, SO3):
         | 
| 277 | 
            +
                        translation = torch.zeros_like(data.data[...,:3])
         | 
| 278 | 
            +
                        data = torch.cat([translation, data.data], -1)
         | 
| 279 | 
            +
             | 
| 280 | 
            +
                    super(SE3, self).__init__(data)
         | 
| 281 | 
            +
             | 
| 282 | 
            +
                def scale(self, s):
         | 
| 283 | 
            +
                    t, q = self.data.split([3,4], -1)
         | 
| 284 | 
            +
                    t = t * s.unsqueeze(-1)
         | 
| 285 | 
            +
                    return SE3(torch.cat([t, q], dim=-1))
         | 
| 286 | 
            +
             | 
| 287 | 
            +
             | 
| 288 | 
            +
            class Sim3(LieGroup):
         | 
| 289 | 
            +
                group_name = 'Sim3'
         | 
| 290 | 
            +
                group_id = 4
         | 
| 291 | 
            +
                manifold_dim = 7
         | 
| 292 | 
            +
                embedded_dim = 8
         | 
| 293 | 
            +
             | 
| 294 | 
            +
                # translation, unit quaternion, scale
         | 
| 295 | 
            +
                id_elem = torch.as_tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0])
         | 
| 296 | 
            +
             | 
| 297 | 
            +
                def __init__(self, data):
         | 
| 298 | 
            +
             | 
| 299 | 
            +
                    if isinstance(data, SO3):
         | 
| 300 | 
            +
                        scale = torch.ones_like(SO3.data[...,:1])
         | 
| 301 | 
            +
                        translation = torch.zeros_like(SO3.data[...,:3])
         | 
| 302 | 
            +
                        data = torch.cat([translation, SO3.data, scale], -1)
         | 
| 303 | 
            +
             | 
| 304 | 
            +
                    elif isinstance(data, SE3):
         | 
| 305 | 
            +
                        scale = torch.ones_like(data.data[...,:1])
         | 
| 306 | 
            +
                        data = torch.cat([data.data, scale], -1)
         | 
| 307 | 
            +
             | 
| 308 | 
            +
                    elif isinstance(data, Sim3):
         | 
| 309 | 
            +
                        data = data.data
         | 
| 310 | 
            +
             | 
| 311 | 
            +
                    super(Sim3, self).__init__(data)
         | 
| 312 | 
            +
             | 
| 313 | 
            +
             | 
| 314 | 
            +
            def cat(group_list, dim):
         | 
| 315 | 
            +
                """ Concatenate groups along dimension """
         | 
| 316 | 
            +
                data = torch.cat([X.data for X in group_list], dim=dim)
         | 
| 317 | 
            +
                return group_list[0].__class__(data)
         | 
| 318 | 
            +
             | 
| 319 | 
            +
            def stack(group_list, dim):
         | 
| 320 | 
            +
                """ Concatenate groups along dimension """
         | 
| 321 | 
            +
                data = torch.stack([X.data for X in group_list], dim=dim)
         | 
| 322 | 
            +
                return group_list[0].__class__(data)
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/run_tests.py
    ADDED
    
    | @@ -0,0 +1,302 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import lietorch
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            from lietorch import SO3, RxSO3, SE3, Sim3
         | 
| 5 | 
            +
            from gradcheck import gradcheck, get_analytical_jacobian
         | 
| 6 | 
            +
             | 
| 7 | 
            +
             | 
| 8 | 
            +
            ### forward tests ###
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            def make_homogeneous(p):
         | 
| 11 | 
            +
                return torch.cat([p, torch.ones_like(p[...,:1])], dim=-1)
         | 
| 12 | 
            +
             | 
| 13 | 
            +
            def matv(A, b):
         | 
| 14 | 
            +
                return torch.matmul(A, b[...,None])[..., 0]
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            def test_exp_log(Group, device='cuda'):
         | 
| 17 | 
            +
                """ check Log(Exp(x)) == x """
         | 
| 18 | 
            +
                a = .2*torch.randn(2,3,4,5,6,7,Group.manifold_dim, device=device).double()
         | 
| 19 | 
            +
                b = Group.exp(a).log()
         | 
| 20 | 
            +
                assert torch.allclose(a,b,atol=1e-8), "should be identity"
         | 
| 21 | 
            +
                print("\t-", Group, "Passed exp-log test")
         | 
| 22 | 
            +
                
         | 
| 23 | 
            +
            def test_inv(Group, device='cuda'):
         | 
| 24 | 
            +
                """ check X * X^{-1} == 0 """
         | 
| 25 | 
            +
                X = Group.exp(.1*torch.randn(2,3,4,5,Group.manifold_dim, device=device).double())
         | 
| 26 | 
            +
                a = (X * X.inv()).log()
         | 
| 27 | 
            +
                assert torch.allclose(a, torch.zeros_like(a), atol=1e-8), "should be 0"
         | 
| 28 | 
            +
                print("\t-", Group, "Passed inv test")
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            def test_adj(Group, device='cuda'):
         | 
| 31 | 
            +
                """ check X * Exp(a) == Exp(Adj(X,a)) * X 0 """
         | 
| 32 | 
            +
                X = Group.exp(torch.randn(2,3,4,5, Group.manifold_dim, device=device).double())
         | 
| 33 | 
            +
                a = torch.randn(2,3,4,5, Group.manifold_dim, device=device).double()
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                b = X.adj(a)
         | 
| 36 | 
            +
                Y1 = X * Group.exp(a)
         | 
| 37 | 
            +
                Y2 = Group.exp(b) * X
         | 
| 38 | 
            +
             | 
| 39 | 
            +
                c = (Y1 * Y2.inv()).log()
         | 
| 40 | 
            +
                assert torch.allclose(c, torch.zeros_like(c), atol=1e-8), "should be 0"
         | 
| 41 | 
            +
                print("\t-", Group, "Passed adj test")
         | 
| 42 | 
            +
                
         | 
| 43 | 
            +
             | 
| 44 | 
            +
            def test_act(Group, device='cuda'):
         | 
| 45 | 
            +
                X = Group.exp(torch.randn(1, Group.manifold_dim, device=device).double())
         | 
| 46 | 
            +
                p = torch.randn(1,3,device=device).double()
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                p1 = X.act(p)
         | 
| 49 | 
            +
                p2 = matv(X.matrix(), make_homogeneous(p))
         | 
| 50 | 
            +
             | 
| 51 | 
            +
                assert torch.allclose(p1, p2[...,:3], atol=1e-8), "should be 0"
         | 
| 52 | 
            +
                print("\t-", Group, "Passed act test")
         | 
| 53 | 
            +
             | 
| 54 | 
            +
             | 
| 55 | 
            +
            ### backward tests ###
         | 
| 56 | 
            +
            def test_exp_log_grad(Group, device='cuda', tol=1e-8):
         | 
| 57 | 
            +
                
         | 
| 58 | 
            +
                D = Group.manifold_dim
         | 
| 59 | 
            +
             | 
| 60 | 
            +
                def fn(a):
         | 
| 61 | 
            +
                    return Group.exp(a).log()
         | 
| 62 | 
            +
             | 
| 63 | 
            +
                a = torch.zeros(1, Group.manifold_dim, requires_grad=True, device=device).double()
         | 
| 64 | 
            +
                analytical, reentrant, correct_grad_sizes, correct_grad_types = \
         | 
| 65 | 
            +
                    get_analytical_jacobian((a,), fn(a))
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                assert torch.allclose(analytical[0], torch.eye(D, device=device).double(), atol=tol)
         | 
| 68 | 
            +
             | 
| 69 | 
            +
                a = .2 * torch.randn(1, Group.manifold_dim, requires_grad=True, device=device).double()
         | 
| 70 | 
            +
                analytical, reentrant, correct_grad_sizes, correct_grad_types = \
         | 
| 71 | 
            +
                    get_analytical_jacobian((a,), fn(a))
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                assert torch.allclose(analytical[0], torch.eye(D, device=device).double(), atol=tol)
         | 
| 74 | 
            +
             | 
| 75 | 
            +
                print("\t-", Group, "Passed eye-grad test")
         | 
| 76 | 
            +
             | 
| 77 | 
            +
             | 
| 78 | 
            +
            def test_inv_log_grad(Group, device='cuda', tol=1e-8):
         | 
| 79 | 
            +
             | 
| 80 | 
            +
                D = Group.manifold_dim
         | 
| 81 | 
            +
                X = Group.exp(.2*torch.randn(1,D,device=device).double())
         | 
| 82 | 
            +
             | 
| 83 | 
            +
                def fn(a):
         | 
| 84 | 
            +
                    return (Group.exp(a) * X).inv().log()
         | 
| 85 | 
            +
             | 
| 86 | 
            +
                a = torch.zeros(1, D, requires_grad=True, device=device).double()
         | 
| 87 | 
            +
                analytical, numerical = gradcheck(fn, [a], eps=1e-4)
         | 
| 88 | 
            +
             | 
| 89 | 
            +
                # assert torch.allclose(analytical[0], numerical[0], atol=tol)
         | 
| 90 | 
            +
                if not torch.allclose(analytical[0], numerical[0], atol=tol):
         | 
| 91 | 
            +
                    print(analytical[0])
         | 
| 92 | 
            +
                    print(numerical[0])
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                print("\t-", Group, "Passed inv-grad test")
         | 
| 95 | 
            +
             | 
| 96 | 
            +
             | 
| 97 | 
            +
            def test_adj_grad(Group, device='cuda'):
         | 
| 98 | 
            +
                D = Group.manifold_dim
         | 
| 99 | 
            +
                X = Group.exp(.5*torch.randn(1,Group.manifold_dim, device=device).double())
         | 
| 100 | 
            +
                
         | 
| 101 | 
            +
                def fn(a, b):
         | 
| 102 | 
            +
                    return (Group.exp(a) * X).adj(b)
         | 
| 103 | 
            +
             | 
| 104 | 
            +
                a = torch.zeros(1, D, requires_grad=True, device=device).double()
         | 
| 105 | 
            +
                b = torch.randn(1, D, requires_grad=True, device=device).double()
         | 
| 106 | 
            +
             | 
| 107 | 
            +
                analytical, numerical = gradcheck(fn, [a, b], eps=1e-4)
         | 
| 108 | 
            +
                assert torch.allclose(analytical[0], numerical[0], atol=1e-8)
         | 
| 109 | 
            +
                assert torch.allclose(analytical[1], numerical[1], atol=1e-8)
         | 
| 110 | 
            +
             | 
| 111 | 
            +
                print("\t-", Group, "Passed adj-grad test")
         | 
| 112 | 
            +
             | 
| 113 | 
            +
             | 
| 114 | 
            +
            def test_adjT_grad(Group, device='cuda'):
         | 
| 115 | 
            +
                D = Group.manifold_dim
         | 
| 116 | 
            +
                X = Group.exp(.5*torch.randn(1,Group.manifold_dim, device=device).double())
         | 
| 117 | 
            +
                
         | 
| 118 | 
            +
                def fn(a, b):
         | 
| 119 | 
            +
                    return (Group.exp(a) * X).adjT(b)
         | 
| 120 | 
            +
             | 
| 121 | 
            +
                a = torch.zeros(1, D, requires_grad=True, device=device).double()
         | 
| 122 | 
            +
                b = torch.randn(1, D, requires_grad=True, device=device).double()
         | 
| 123 | 
            +
             | 
| 124 | 
            +
                analytical, numerical = gradcheck(fn, [a, b], eps=1e-4)
         | 
| 125 | 
            +
             | 
| 126 | 
            +
                assert torch.allclose(analytical[0], numerical[0], atol=1e-8)
         | 
| 127 | 
            +
                assert torch.allclose(analytical[1], numerical[1], atol=1e-8)
         | 
| 128 | 
            +
             | 
| 129 | 
            +
                print("\t-", Group, "Passed adjT-grad test")
         | 
| 130 | 
            +
             | 
| 131 | 
            +
             | 
| 132 | 
            +
            def test_act_grad(Group, device='cuda'):
         | 
| 133 | 
            +
                D = Group.manifold_dim
         | 
| 134 | 
            +
                X = Group.exp(5*torch.randn(1,D, device=device).double())
         | 
| 135 | 
            +
                
         | 
| 136 | 
            +
                def fn(a, b):
         | 
| 137 | 
            +
                    return (X*Group.exp(a)).act(b)
         | 
| 138 | 
            +
             | 
| 139 | 
            +
                a = torch.zeros(1, D, requires_grad=True, device=device).double()
         | 
| 140 | 
            +
                b = torch.randn(1, 3, requires_grad=True, device=device).double()
         | 
| 141 | 
            +
             | 
| 142 | 
            +
                analytical, numerical = gradcheck(fn, [a, b], eps=1e-4)
         | 
| 143 | 
            +
             | 
| 144 | 
            +
                assert torch.allclose(analytical[0], numerical[0], atol=1e-8)
         | 
| 145 | 
            +
                assert torch.allclose(analytical[1], numerical[1], atol=1e-8)
         | 
| 146 | 
            +
             | 
| 147 | 
            +
                print("\t-", Group, "Passed act-grad test")
         | 
| 148 | 
            +
             | 
| 149 | 
            +
             | 
| 150 | 
            +
            def test_matrix_grad(Group, device='cuda'):
         | 
| 151 | 
            +
                D = Group.manifold_dim
         | 
| 152 | 
            +
                X = Group.exp(torch.randn(1, D, device=device).double())
         | 
| 153 | 
            +
                
         | 
| 154 | 
            +
                def fn(a):
         | 
| 155 | 
            +
                    return (Group.exp(a) * X).matrix()
         | 
| 156 | 
            +
             | 
| 157 | 
            +
                a = torch.zeros(1, D, requires_grad=True, device=device).double()
         | 
| 158 | 
            +
                analytical, numerical = gradcheck(fn, [a], eps=1e-4)
         | 
| 159 | 
            +
                assert torch.allclose(analytical[0], numerical[0], atol=1e-6)
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                print("\t-", Group, "Passed matrix-grad test")
         | 
| 162 | 
            +
             | 
| 163 | 
            +
             | 
| 164 | 
            +
            def extract_translation_grad(Group, device='cuda'):
         | 
| 165 | 
            +
                """ prototype function """
         | 
| 166 | 
            +
             | 
| 167 | 
            +
                D = Group.manifold_dim
         | 
| 168 | 
            +
                X = Group.exp(5*torch.randn(1,D, device=device).double())
         | 
| 169 | 
            +
                
         | 
| 170 | 
            +
                def fn(a):
         | 
| 171 | 
            +
                    return (Group.exp(a)*X).translation()
         | 
| 172 | 
            +
             | 
| 173 | 
            +
                a = torch.zeros(1, D, requires_grad=True, device=device).double()
         | 
| 174 | 
            +
             | 
| 175 | 
            +
                analytical, numerical = gradcheck(fn, [a], eps=1e-4)
         | 
| 176 | 
            +
             | 
| 177 | 
            +
                assert torch.allclose(analytical[0], numerical[0], atol=1e-8)
         | 
| 178 | 
            +
                print("\t-", Group, "Passed translation grad test")
         | 
| 179 | 
            +
             | 
| 180 | 
            +
             | 
| 181 | 
            +
            def test_vec_grad(Group, device='cuda', tol=1e-6):
         | 
| 182 | 
            +
             | 
| 183 | 
            +
                D = Group.manifold_dim
         | 
| 184 | 
            +
                X = Group.exp(5*torch.randn(1,D, device=device).double())
         | 
| 185 | 
            +
                
         | 
| 186 | 
            +
                def fn(a):
         | 
| 187 | 
            +
                    return (Group.exp(a)*X).vec()
         | 
| 188 | 
            +
             | 
| 189 | 
            +
                a = torch.zeros(1, D, requires_grad=True, device=device).double()
         | 
| 190 | 
            +
             | 
| 191 | 
            +
                analytical, numerical = gradcheck(fn, [a], eps=1e-4)
         | 
| 192 | 
            +
             | 
| 193 | 
            +
                assert torch.allclose(analytical[0], numerical[0], atol=tol)
         | 
| 194 | 
            +
                print("\t-", Group, "Passed tovec grad test")
         | 
| 195 | 
            +
             | 
| 196 | 
            +
             | 
| 197 | 
            +
            def test_fromvec_grad(Group, device='cuda', tol=1e-6):
         | 
| 198 | 
            +
             | 
| 199 | 
            +
                def fn(a):
         | 
| 200 | 
            +
                    if Group == SO3:
         | 
| 201 | 
            +
                        a = a / a.norm(dim=-1, keepdim=True)
         | 
| 202 | 
            +
             | 
| 203 | 
            +
                    elif Group == RxSO3:
         | 
| 204 | 
            +
                        q, s = a.split([4, 1], dim=-1)
         | 
| 205 | 
            +
                        q = q / q.norm(dim=-1, keepdim=True)
         | 
| 206 | 
            +
                        a = torch.cat([q, s.exp()], dim=-1)
         | 
| 207 | 
            +
             | 
| 208 | 
            +
                    elif Group == SE3:
         | 
| 209 | 
            +
                        t, q = a.split([3, 4], dim=-1)
         | 
| 210 | 
            +
                        q = q / q.norm(dim=-1, keepdim=True)
         | 
| 211 | 
            +
                        a = torch.cat([t, q], dim=-1)
         | 
| 212 | 
            +
             | 
| 213 | 
            +
                    elif Group == Sim3:
         | 
| 214 | 
            +
                        t, q, s = a.split([3, 4, 1], dim=-1)
         | 
| 215 | 
            +
                        q = q / q.norm(dim=-1, keepdim=True)
         | 
| 216 | 
            +
                        a = torch.cat([t, q, s.exp()], dim=-1)
         | 
| 217 | 
            +
             | 
| 218 | 
            +
                    return Group.InitFromVec(a).vec()
         | 
| 219 | 
            +
             | 
| 220 | 
            +
                D = Group.embedded_dim
         | 
| 221 | 
            +
                a = torch.randn(1, 2, D, requires_grad=True, device=device).double()
         | 
| 222 | 
            +
             | 
| 223 | 
            +
                analytical, numerical = gradcheck(fn, [a], eps=1e-4)
         | 
| 224 | 
            +
             | 
| 225 | 
            +
                assert torch.allclose(analytical[0], numerical[0], atol=tol)
         | 
| 226 | 
            +
                print("\t-", Group, "Passed fromvec grad test")
         | 
| 227 | 
            +
             | 
| 228 | 
            +
             | 
| 229 | 
            +
             | 
| 230 | 
            +
            def scale(device='cuda'):
         | 
| 231 | 
            +
                
         | 
| 232 | 
            +
                def fn(a, s):
         | 
| 233 | 
            +
                    X = SE3.exp(a)
         | 
| 234 | 
            +
                    X.scale(s)
         | 
| 235 | 
            +
                    return X.log()
         | 
| 236 | 
            +
             | 
| 237 | 
            +
                s = torch.rand(1, requires_grad=True, device=device).double()
         | 
| 238 | 
            +
                a = torch.randn(1, 6, requires_grad=True, device=device).double()
         | 
| 239 | 
            +
                
         | 
| 240 | 
            +
                analytical, numerical = gradcheck(fn, [a, s], eps=1e-3)
         | 
| 241 | 
            +
                print(analytical[1])
         | 
| 242 | 
            +
                print(numerical[1])
         | 
| 243 | 
            +
             | 
| 244 | 
            +
             | 
| 245 | 
            +
                assert torch.allclose(analytical[0], numerical[0], atol=1e-8)
         | 
| 246 | 
            +
                assert torch.allclose(analytical[1], numerical[1], atol=1e-8)
         | 
| 247 | 
            +
             | 
| 248 | 
            +
                print("\t-", "Passed se3-to-sim3 test")
         | 
| 249 | 
            +
             | 
| 250 | 
            +
                
         | 
| 251 | 
            +
            if __name__ == '__main__':
         | 
| 252 | 
            +
             | 
| 253 | 
            +
             | 
| 254 | 
            +
                print("Testing lietorch forward pass (CPU) ...")
         | 
| 255 | 
            +
                for Group in [SO3, RxSO3, SE3, Sim3]:
         | 
| 256 | 
            +
                    test_exp_log(Group, device='cpu')
         | 
| 257 | 
            +
                    test_inv(Group, device='cpu')
         | 
| 258 | 
            +
                    test_adj(Group, device='cpu')
         | 
| 259 | 
            +
                    test_act(Group, device='cpu')
         | 
| 260 | 
            +
             | 
| 261 | 
            +
                print("Testing lietorch backward pass (CPU)...")
         | 
| 262 | 
            +
                for Group in [SO3, RxSO3, SE3, Sim3]:
         | 
| 263 | 
            +
                    if Group == Sim3:
         | 
| 264 | 
            +
                        tol = 1e-3
         | 
| 265 | 
            +
                    else:
         | 
| 266 | 
            +
                        tol = 1e-8
         | 
| 267 | 
            +
             | 
| 268 | 
            +
                    test_exp_log_grad(Group, device='cpu', tol=tol)
         | 
| 269 | 
            +
                    test_inv_log_grad(Group, device='cpu', tol=tol)
         | 
| 270 | 
            +
                    test_adj_grad(Group, device='cpu')
         | 
| 271 | 
            +
                    test_adjT_grad(Group, device='cpu')
         | 
| 272 | 
            +
                    test_act_grad(Group, device='cpu')
         | 
| 273 | 
            +
                    test_matrix_grad(Group, device='cpu')
         | 
| 274 | 
            +
                    extract_translation_grad(Group, device='cpu')
         | 
| 275 | 
            +
                    test_vec_grad(Group, device='cpu')
         | 
| 276 | 
            +
                    test_fromvec_grad(Group, device='cpu')
         | 
| 277 | 
            +
             | 
| 278 | 
            +
                print("Testing lietorch forward pass (GPU) ...")
         | 
| 279 | 
            +
                for Group in [SO3, RxSO3, SE3, Sim3]:
         | 
| 280 | 
            +
                    test_exp_log(Group, device='cuda')
         | 
| 281 | 
            +
                    test_inv(Group, device='cuda')
         | 
| 282 | 
            +
                    test_adj(Group, device='cuda')
         | 
| 283 | 
            +
                    test_act(Group, device='cuda')
         | 
| 284 | 
            +
             | 
| 285 | 
            +
                print("Testing lietorch backward pass (GPU)...")
         | 
| 286 | 
            +
                for Group in [SO3, RxSO3, SE3, Sim3]:
         | 
| 287 | 
            +
                    if Group == Sim3:
         | 
| 288 | 
            +
                        tol = 1e-3
         | 
| 289 | 
            +
                    else:
         | 
| 290 | 
            +
                        tol = 1e-8
         | 
| 291 | 
            +
             | 
| 292 | 
            +
                    test_exp_log_grad(Group, device='cuda', tol=tol)
         | 
| 293 | 
            +
                    test_inv_log_grad(Group, device='cuda', tol=tol)
         | 
| 294 | 
            +
                    test_adj_grad(Group, device='cuda')
         | 
| 295 | 
            +
                    test_adjT_grad(Group, device='cuda')
         | 
| 296 | 
            +
                    test_act_grad(Group, device='cuda')
         | 
| 297 | 
            +
                    test_matrix_grad(Group, device='cuda')
         | 
| 298 | 
            +
                    extract_translation_grad(Group, device='cuda')
         | 
| 299 | 
            +
                    test_vec_grad(Group, device='cuda')
         | 
| 300 | 
            +
                    test_fromvec_grad(Group, device='cuda')
         | 
| 301 | 
            +
             | 
| 302 | 
            +
             | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/logger.py
    ADDED
    
    | @@ -0,0 +1,58 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
             | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            from torch.utils.tensorboard import SummaryWriter
         | 
| 4 | 
            +
             | 
| 5 | 
            +
             | 
| 6 | 
            +
            SUM_FREQ = 100
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            class Logger:
         | 
| 9 | 
            +
                def __init__(self, name, scheduler):
         | 
| 10 | 
            +
                    self.total_steps = 0
         | 
| 11 | 
            +
                    self.running_loss = {}
         | 
| 12 | 
            +
                    self.writer = None
         | 
| 13 | 
            +
                    self.name = name
         | 
| 14 | 
            +
                    self.scheduler = scheduler
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                def _print_training_status(self):
         | 
| 17 | 
            +
                    if self.writer is None:
         | 
| 18 | 
            +
                        self.writer = SummaryWriter("runs/{}".format(self.name))
         | 
| 19 | 
            +
                        print([k for k in self.running_loss])
         | 
| 20 | 
            +
             | 
| 21 | 
            +
                    lr = self.scheduler.get_lr().pop()
         | 
| 22 | 
            +
                    metrics_data = [self.running_loss[k]/SUM_FREQ for k in self.running_loss.keys()]
         | 
| 23 | 
            +
                    training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, lr)
         | 
| 24 | 
            +
                    metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
         | 
| 25 | 
            +
                    
         | 
| 26 | 
            +
                    # print the training status
         | 
| 27 | 
            +
                    print(training_str + metrics_str)
         | 
| 28 | 
            +
             | 
| 29 | 
            +
                    for key in self.running_loss:
         | 
| 30 | 
            +
                        val = self.running_loss[key] / SUM_FREQ
         | 
| 31 | 
            +
                        self.writer.add_scalar(key, val, self.total_steps)
         | 
| 32 | 
            +
                        self.running_loss[key] = 0.0
         | 
| 33 | 
            +
             | 
| 34 | 
            +
                def push(self, metrics):
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                    for key in metrics:
         | 
| 37 | 
            +
                        if key not in self.running_loss:
         | 
| 38 | 
            +
                            self.running_loss[key] = 0.0
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                        self.running_loss[key] += metrics[key]
         | 
| 41 | 
            +
             | 
| 42 | 
            +
                    if self.total_steps % SUM_FREQ == SUM_FREQ-1:
         | 
| 43 | 
            +
                        self._print_training_status()
         | 
| 44 | 
            +
                        self.running_loss = {}
         | 
| 45 | 
            +
             | 
| 46 | 
            +
                    self.total_steps += 1
         | 
| 47 | 
            +
             | 
| 48 | 
            +
                def write_dict(self, results):
         | 
| 49 | 
            +
                    if self.writer is None:
         | 
| 50 | 
            +
                        self.writer = SummaryWriter("runs/{}".format(self.name))
         | 
| 51 | 
            +
                        print([k for k in self.running_loss])
         | 
| 52 | 
            +
                        
         | 
| 53 | 
            +
                    for key in results:
         | 
| 54 | 
            +
                        self.writer.add_scalar(key, results[key], self.total_steps)
         | 
| 55 | 
            +
             | 
| 56 | 
            +
                def close(self):
         | 
| 57 | 
            +
                    self.writer.close()
         | 
| 58 | 
            +
             | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/net.py
    ADDED
    
    | @@ -0,0 +1,270 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import numpy as np
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            import torch.nn as nn
         | 
| 4 | 
            +
            import torch.nn.functional as F
         | 
| 5 | 
            +
            from collections import OrderedDict
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            import torch_scatter
         | 
| 8 | 
            +
            from torch_scatter import scatter_sum
         | 
| 9 | 
            +
             | 
| 10 | 
            +
            from . import fastba
         | 
| 11 | 
            +
            from . import altcorr
         | 
| 12 | 
            +
            from . import lietorch
         | 
| 13 | 
            +
            from .lietorch import SE3
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            from .extractor import BasicEncoder, BasicEncoder4
         | 
| 16 | 
            +
            from .blocks import GradientClip, GatedResidual, SoftAgg
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            from .utils import *
         | 
| 19 | 
            +
            from .ba import BA
         | 
| 20 | 
            +
            from . import projective_ops as pops
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            autocast = torch.cuda.amp.autocast
         | 
| 23 | 
            +
            import matplotlib.pyplot as plt
         | 
| 24 | 
            +
             | 
| 25 | 
            +
            DIM = 384
         | 
| 26 | 
            +
             | 
| 27 | 
            +
            class Update(nn.Module):
         | 
| 28 | 
            +
                def __init__(self, p):
         | 
| 29 | 
            +
                    super(Update, self).__init__()
         | 
| 30 | 
            +
             | 
| 31 | 
            +
                    self.c1 = nn.Sequential(
         | 
| 32 | 
            +
                        nn.Linear(DIM, DIM),
         | 
| 33 | 
            +
                        nn.ReLU(inplace=True),
         | 
| 34 | 
            +
                        nn.Linear(DIM, DIM))
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                    self.c2 = nn.Sequential(
         | 
| 37 | 
            +
                        nn.Linear(DIM, DIM),
         | 
| 38 | 
            +
                        nn.ReLU(inplace=True),
         | 
| 39 | 
            +
                        nn.Linear(DIM, DIM))
         | 
| 40 | 
            +
                    
         | 
| 41 | 
            +
                    self.norm = nn.LayerNorm(DIM, eps=1e-3)
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                    self.agg_kk = SoftAgg(DIM)
         | 
| 44 | 
            +
                    self.agg_ij = SoftAgg(DIM)
         | 
| 45 | 
            +
             | 
| 46 | 
            +
                    self.gru = nn.Sequential(
         | 
| 47 | 
            +
                        nn.LayerNorm(DIM, eps=1e-3),
         | 
| 48 | 
            +
                        GatedResidual(DIM),
         | 
| 49 | 
            +
                        nn.LayerNorm(DIM, eps=1e-3),
         | 
| 50 | 
            +
                        GatedResidual(DIM),
         | 
| 51 | 
            +
                    )
         | 
| 52 | 
            +
             | 
| 53 | 
            +
                    self.corr = nn.Sequential(
         | 
| 54 | 
            +
                        nn.Linear(2*49*p*p, DIM),
         | 
| 55 | 
            +
                        nn.ReLU(inplace=True),
         | 
| 56 | 
            +
                        nn.Linear(DIM, DIM),
         | 
| 57 | 
            +
                        nn.LayerNorm(DIM, eps=1e-3),
         | 
| 58 | 
            +
                        nn.ReLU(inplace=True),
         | 
| 59 | 
            +
                        nn.Linear(DIM, DIM),
         | 
| 60 | 
            +
                    )
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                    self.d = nn.Sequential(
         | 
| 63 | 
            +
                        nn.ReLU(inplace=False),
         | 
| 64 | 
            +
                        nn.Linear(DIM, 2),
         | 
| 65 | 
            +
                        GradientClip())
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                    self.w = nn.Sequential(
         | 
| 68 | 
            +
                        nn.ReLU(inplace=False),
         | 
| 69 | 
            +
                        nn.Linear(DIM, 2),
         | 
| 70 | 
            +
                        GradientClip(),
         | 
| 71 | 
            +
                        nn.Sigmoid())
         | 
| 72 | 
            +
             | 
| 73 | 
            +
             | 
| 74 | 
            +
                def forward(self, net, inp, corr, flow, ii, jj, kk):
         | 
| 75 | 
            +
                    """ update operator """
         | 
| 76 | 
            +
             | 
| 77 | 
            +
                    net = net + inp + self.corr(corr)
         | 
| 78 | 
            +
                    net = self.norm(net)
         | 
| 79 | 
            +
             | 
| 80 | 
            +
                    ix, jx = fastba.neighbors(kk, jj)
         | 
| 81 | 
            +
                    mask_ix = (ix >= 0).float().reshape(1, -1, 1)
         | 
| 82 | 
            +
                    mask_jx = (jx >= 0).float().reshape(1, -1, 1)
         | 
| 83 | 
            +
             | 
| 84 | 
            +
                    net = net + self.c1(mask_ix * net[:,ix])
         | 
| 85 | 
            +
                    net = net + self.c2(mask_jx * net[:,jx])
         | 
| 86 | 
            +
             | 
| 87 | 
            +
                    net = net + self.agg_kk(net, kk)
         | 
| 88 | 
            +
                    net = net + self.agg_ij(net, ii*12345 + jj)
         | 
| 89 | 
            +
             | 
| 90 | 
            +
                    net = self.gru(net)
         | 
| 91 | 
            +
             | 
| 92 | 
            +
                    return net, (self.d(net), self.w(net), None)
         | 
| 93 | 
            +
             | 
| 94 | 
            +
             | 
| 95 | 
            +
            class Patchifier(nn.Module):
         | 
| 96 | 
            +
                def __init__(self, patch_size=3):
         | 
| 97 | 
            +
                    super(Patchifier, self).__init__()
         | 
| 98 | 
            +
                    self.patch_size = patch_size
         | 
| 99 | 
            +
                    self.fnet = BasicEncoder4(output_dim=128, norm_fn='instance')
         | 
| 100 | 
            +
                    self.inet = BasicEncoder4(output_dim=DIM, norm_fn='none')
         | 
| 101 | 
            +
             | 
| 102 | 
            +
                def __image_gradient(self, images):
         | 
| 103 | 
            +
                    gray = ((images + 0.5) * (255.0 / 2)).sum(dim=2)
         | 
| 104 | 
            +
                    dx = gray[...,:-1,1:] - gray[...,:-1,:-1]
         | 
| 105 | 
            +
                    dy = gray[...,1:,:-1] - gray[...,:-1,:-1]
         | 
| 106 | 
            +
                    g = torch.sqrt(dx**2 + dy**2)
         | 
| 107 | 
            +
                    g = F.avg_pool2d(g, 4, 4)
         | 
| 108 | 
            +
                    return g
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                def forward(self, images, patches_per_image=80, disps=None, gradient_bias=False, return_color=False):
         | 
| 111 | 
            +
                    """ extract patches from input images """
         | 
| 112 | 
            +
                    fmap = self.fnet(images) / 4.0
         | 
| 113 | 
            +
                    imap = self.inet(images) / 4.0
         | 
| 114 | 
            +
             | 
| 115 | 
            +
                    b, n, c, h, w = fmap.shape
         | 
| 116 | 
            +
                    P = self.patch_size
         | 
| 117 | 
            +
             | 
| 118 | 
            +
                    # bias patch selection towards regions with high gradient
         | 
| 119 | 
            +
                    if gradient_bias:
         | 
| 120 | 
            +
                        g = self.__image_gradient(images)
         | 
| 121 | 
            +
                        x = torch.randint(1, w-1, size=[n, 3*patches_per_image], device="cuda")
         | 
| 122 | 
            +
                        y = torch.randint(1, h-1, size=[n, 3*patches_per_image], device="cuda")
         | 
| 123 | 
            +
             | 
| 124 | 
            +
                        coords = torch.stack([x, y], dim=-1).float()
         | 
| 125 | 
            +
                        g = altcorr.patchify(g[0,:,None], coords, 0).view(n, 3 * patches_per_image)
         | 
| 126 | 
            +
                        
         | 
| 127 | 
            +
                        ix = torch.argsort(g, dim=1)
         | 
| 128 | 
            +
                        x = torch.gather(x, 1, ix[:, -patches_per_image:])
         | 
| 129 | 
            +
                        y = torch.gather(y, 1, ix[:, -patches_per_image:])
         | 
| 130 | 
            +
             | 
| 131 | 
            +
                    else:
         | 
| 132 | 
            +
                        x = torch.randint(1, w-1, size=[n, patches_per_image], device="cuda")
         | 
| 133 | 
            +
                        y = torch.randint(1, h-1, size=[n, patches_per_image], device="cuda")
         | 
| 134 | 
            +
                    
         | 
| 135 | 
            +
                    coords = torch.stack([x, y], dim=-1).float()
         | 
| 136 | 
            +
                    imap = altcorr.patchify(imap[0], coords, 0).view(b, -1, DIM, 1, 1)
         | 
| 137 | 
            +
                    gmap = altcorr.patchify(fmap[0], coords, P//2).view(b, -1, 128, P, P)
         | 
| 138 | 
            +
             | 
| 139 | 
            +
                    if return_color:
         | 
| 140 | 
            +
                        clr = altcorr.patchify(images[0], 4*(coords + 0.5), 0).view(b, -1, 3)
         | 
| 141 | 
            +
             | 
| 142 | 
            +
                    if disps is None:
         | 
| 143 | 
            +
                        disps = torch.ones(b, n, h, w, device="cuda")
         | 
| 144 | 
            +
             | 
| 145 | 
            +
                    grid, _ = coords_grid_with_index(disps, device=fmap.device)
         | 
| 146 | 
            +
                    patches = altcorr.patchify(grid[0], coords, P//2).view(b, -1, 3, P, P)
         | 
| 147 | 
            +
             | 
| 148 | 
            +
                    index = torch.arange(n, device="cuda").view(n, 1)
         | 
| 149 | 
            +
                    index = index.repeat(1, patches_per_image).reshape(-1)
         | 
| 150 | 
            +
             | 
| 151 | 
            +
                    if return_color:
         | 
| 152 | 
            +
                        return fmap, gmap, imap, patches, index, clr
         | 
| 153 | 
            +
             | 
| 154 | 
            +
                    return fmap, gmap, imap, patches, index
         | 
| 155 | 
            +
             | 
| 156 | 
            +
             | 
| 157 | 
            +
            class CorrBlock:
         | 
| 158 | 
            +
                def __init__(self, fmap, gmap, radius=3, dropout=0.2, levels=[1,4]):
         | 
| 159 | 
            +
                    self.dropout = dropout
         | 
| 160 | 
            +
                    self.radius = radius
         | 
| 161 | 
            +
                    self.levels = levels
         | 
| 162 | 
            +
             | 
| 163 | 
            +
                    self.gmap = gmap
         | 
| 164 | 
            +
                    self.pyramid = pyramidify(fmap, lvls=levels)
         | 
| 165 | 
            +
             | 
| 166 | 
            +
                def __call__(self, ii, jj, coords):
         | 
| 167 | 
            +
                    corrs = []
         | 
| 168 | 
            +
                    for i in range(len(self.levels)):
         | 
| 169 | 
            +
                        corrs += [ altcorr.corr(self.gmap, self.pyramid[i], coords / self.levels[i], ii, jj, self.radius, self.dropout) ]
         | 
| 170 | 
            +
                    return torch.stack(corrs, -1).view(1, len(ii), -1)
         | 
| 171 | 
            +
             | 
| 172 | 
            +
             | 
| 173 | 
            +
            class VONet(nn.Module):
         | 
| 174 | 
            +
                def __init__(self, use_viewer=False):
         | 
| 175 | 
            +
                    super(VONet, self).__init__()
         | 
| 176 | 
            +
                    self.P = 3
         | 
| 177 | 
            +
                    self.patchify = Patchifier(self.P)
         | 
| 178 | 
            +
                    self.update = Update(self.P)
         | 
| 179 | 
            +
             | 
| 180 | 
            +
                    self.DIM = DIM
         | 
| 181 | 
            +
                    self.RES = 4
         | 
| 182 | 
            +
             | 
| 183 | 
            +
             | 
| 184 | 
            +
                @autocast(enabled=False)
         | 
| 185 | 
            +
                def forward(self, images, poses, disps, intrinsics, M=1024, STEPS=12, P=1, structure_only=False, rescale=False):
         | 
| 186 | 
            +
                    """ Estimates SE3 or Sim3 between pair of frames """
         | 
| 187 | 
            +
             | 
| 188 | 
            +
                    images = 2 * (images / 255.0) - 0.5
         | 
| 189 | 
            +
                    intrinsics = intrinsics / 4.0
         | 
| 190 | 
            +
                    disps = disps[:, :, 1::4, 1::4].float()
         | 
| 191 | 
            +
             | 
| 192 | 
            +
                    fmap, gmap, imap, patches, ix = self.patchify(images, disps=disps)
         | 
| 193 | 
            +
             | 
| 194 | 
            +
                    corr_fn = CorrBlock(fmap, gmap)
         | 
| 195 | 
            +
             | 
| 196 | 
            +
                    b, N, c, h, w = fmap.shape
         | 
| 197 | 
            +
                    p = self.P
         | 
| 198 | 
            +
             | 
| 199 | 
            +
                    patches_gt = patches.clone()
         | 
| 200 | 
            +
                    Ps = poses
         | 
| 201 | 
            +
             | 
| 202 | 
            +
                    d = patches[..., 2, p//2, p//2]
         | 
| 203 | 
            +
                    patches = set_depth(patches, torch.rand_like(d))
         | 
| 204 | 
            +
             | 
| 205 | 
            +
                    kk, jj = flatmeshgrid(torch.where(ix < 8)[0], torch.arange(0,8, device="cuda"))
         | 
| 206 | 
            +
                    ii = ix[kk]
         | 
| 207 | 
            +
             | 
| 208 | 
            +
                    imap = imap.view(b, -1, DIM)
         | 
| 209 | 
            +
                    net = torch.zeros(b, len(kk), DIM, device="cuda", dtype=torch.float)
         | 
| 210 | 
            +
                    
         | 
| 211 | 
            +
                    Gs = SE3.IdentityLike(poses)
         | 
| 212 | 
            +
             | 
| 213 | 
            +
                    if structure_only:
         | 
| 214 | 
            +
                        Gs.data[:] = poses.data[:]
         | 
| 215 | 
            +
             | 
| 216 | 
            +
                    traj = []
         | 
| 217 | 
            +
                    bounds = [-64, -64, w + 64, h + 64]
         | 
| 218 | 
            +
                    
         | 
| 219 | 
            +
                    while len(traj) < STEPS:
         | 
| 220 | 
            +
                        Gs = Gs.detach()
         | 
| 221 | 
            +
                        patches = patches.detach()
         | 
| 222 | 
            +
             | 
| 223 | 
            +
                        n = ii.max() + 1
         | 
| 224 | 
            +
                        if len(traj) >= 8 and n < images.shape[1]:
         | 
| 225 | 
            +
                            if not structure_only: Gs.data[:,n] = Gs.data[:,n-1]
         | 
| 226 | 
            +
                            kk1, jj1 = flatmeshgrid(torch.where(ix  < n)[0], torch.arange(n, n+1, device="cuda"))
         | 
| 227 | 
            +
                            kk2, jj2 = flatmeshgrid(torch.where(ix == n)[0], torch.arange(0, n+1, device="cuda"))
         | 
| 228 | 
            +
             | 
| 229 | 
            +
                            ii = torch.cat([ix[kk1], ix[kk2], ii])
         | 
| 230 | 
            +
                            jj = torch.cat([jj1, jj2, jj])
         | 
| 231 | 
            +
                            kk = torch.cat([kk1, kk2, kk])
         | 
| 232 | 
            +
             | 
| 233 | 
            +
                            net1 = torch.zeros(b, len(kk1) + len(kk2), DIM, device="cuda")
         | 
| 234 | 
            +
                            net = torch.cat([net1, net], dim=1)
         | 
| 235 | 
            +
             | 
| 236 | 
            +
                            if np.random.rand() < 0.1:
         | 
| 237 | 
            +
                                k = (ii != (n - 4)) & (jj != (n - 4))
         | 
| 238 | 
            +
                                ii = ii[k]
         | 
| 239 | 
            +
                                jj = jj[k]
         | 
| 240 | 
            +
                                kk = kk[k]
         | 
| 241 | 
            +
                                net = net[:,k]
         | 
| 242 | 
            +
             | 
| 243 | 
            +
                            patches[:,ix==n,2] = torch.median(patches[:,(ix == n-1) | (ix == n-2),2])
         | 
| 244 | 
            +
                            n = ii.max() + 1
         | 
| 245 | 
            +
             | 
| 246 | 
            +
                        coords = pops.transform(Gs, patches, intrinsics, ii, jj, kk)
         | 
| 247 | 
            +
                        coords1 = coords.permute(0, 1, 4, 2, 3).contiguous()
         | 
| 248 | 
            +
             | 
| 249 | 
            +
                        corr = corr_fn(kk, jj, coords1)
         | 
| 250 | 
            +
                        net, (delta, weight, _) = self.update(net, imap[:,kk], corr, None, ii, jj, kk)
         | 
| 251 | 
            +
             | 
| 252 | 
            +
                        lmbda = 1e-4
         | 
| 253 | 
            +
                        target = coords[...,p//2,p//2,:] + delta
         | 
| 254 | 
            +
             | 
| 255 | 
            +
                        ep = 10
         | 
| 256 | 
            +
                        for itr in range(2):
         | 
| 257 | 
            +
                            Gs, patches = BA(Gs, patches, intrinsics, target, weight, lmbda, ii, jj, kk, 
         | 
| 258 | 
            +
                                bounds, ep=ep, fixedp=1, structure_only=structure_only)
         | 
| 259 | 
            +
             | 
| 260 | 
            +
                        kl = torch.as_tensor(0)
         | 
| 261 | 
            +
                        dij = (ii - jj).abs()
         | 
| 262 | 
            +
                        k = (dij > 0) & (dij <= 2)
         | 
| 263 | 
            +
             | 
| 264 | 
            +
                        coords = pops.transform(Gs, patches, intrinsics, ii[k], jj[k], kk[k])
         | 
| 265 | 
            +
                        coords_gt, valid, _ = pops.transform(Ps, patches_gt, intrinsics, ii[k], jj[k], kk[k], jacobian=True)
         | 
| 266 | 
            +
             | 
| 267 | 
            +
                        traj.append((valid, coords, coords_gt, Gs[:,:n], Ps[:,:n], kl))
         | 
| 268 | 
            +
             | 
| 269 | 
            +
                    return traj
         | 
| 270 | 
            +
             | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/plot_utils.py
    ADDED
    
    | @@ -0,0 +1,52 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from copy import deepcopy
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            import matplotlib.pyplot as plt
         | 
| 4 | 
            +
            import numpy as np
         | 
| 5 | 
            +
            from evo.core import sync
         | 
| 6 | 
            +
            from evo.core.trajectory import PoseTrajectory3D
         | 
| 7 | 
            +
            from evo.tools import plot
         | 
| 8 | 
            +
            from pathlib import Path
         | 
| 9 | 
            +
             | 
| 10 | 
            +
             | 
| 11 | 
            +
            def make_traj(args) -> PoseTrajectory3D:
         | 
| 12 | 
            +
                if isinstance(args, tuple):
         | 
| 13 | 
            +
                    traj, tstamps = args
         | 
| 14 | 
            +
                    return PoseTrajectory3D(positions_xyz=traj[:,:3], orientations_quat_wxyz=traj[:,3:], timestamps=tstamps)
         | 
| 15 | 
            +
                assert isinstance(args, PoseTrajectory3D), type(args)
         | 
| 16 | 
            +
                return deepcopy(args)
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            def best_plotmode(traj):
         | 
| 19 | 
            +
                _, i1, i2 = np.argsort(np.var(traj.positions_xyz, axis=0))
         | 
| 20 | 
            +
                plot_axes = "xyz"[i2] + "xyz"[i1]
         | 
| 21 | 
            +
                return getattr(plot.PlotMode, plot_axes)
         | 
| 22 | 
            +
             | 
| 23 | 
            +
            def plot_trajectory(pred_traj, gt_traj=None, title="", filename="", align=True, correct_scale=True):
         | 
| 24 | 
            +
                pred_traj = make_traj(pred_traj)
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                if gt_traj is not None:
         | 
| 27 | 
            +
                    gt_traj = make_traj(gt_traj)
         | 
| 28 | 
            +
                    gt_traj, pred_traj = sync.associate_trajectories(gt_traj, pred_traj)
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                    if align:
         | 
| 31 | 
            +
                        pred_traj.align(gt_traj, correct_scale=correct_scale)
         | 
| 32 | 
            +
             | 
| 33 | 
            +
                plot_collection = plot.PlotCollection("PlotCol")
         | 
| 34 | 
            +
                fig = plt.figure(figsize=(8, 8))
         | 
| 35 | 
            +
                plot_mode = best_plotmode(gt_traj if (gt_traj is not None) else pred_traj)
         | 
| 36 | 
            +
                ax = plot.prepare_axis(fig, plot_mode)
         | 
| 37 | 
            +
                ax.set_title(title)
         | 
| 38 | 
            +
                if gt_traj is not None:
         | 
| 39 | 
            +
                    plot.traj(ax, plot_mode, gt_traj, '--', 'gray', "Ground Truth")
         | 
| 40 | 
            +
                plot.traj(ax, plot_mode, pred_traj, '-', 'blue', "Predicted")
         | 
| 41 | 
            +
                plot_collection.add_figure("traj (error)", fig)
         | 
| 42 | 
            +
                plot_collection.export(filename, confirm_overwrite=False)
         | 
| 43 | 
            +
                plt.close(fig=fig)
         | 
| 44 | 
            +
                print(f"Saved {filename}")
         | 
| 45 | 
            +
             | 
| 46 | 
            +
            def save_trajectory_tum_format(traj, filename):
         | 
| 47 | 
            +
                traj = make_traj(traj)
         | 
| 48 | 
            +
                tostr = lambda a: ' '.join(map(str, a))
         | 
| 49 | 
            +
                with Path(filename).open('w') as f:
         | 
| 50 | 
            +
                    for i in range(traj.num_poses):
         | 
| 51 | 
            +
                        f.write(f"{traj.timestamps[i]} {tostr(traj.positions_xyz[i])} {tostr(traj.orientations_quat_wxyz[i][[1,2,3,0]])}\n")
         | 
| 52 | 
            +
                print(f"Saved {filename}")
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/projective_ops.py
    ADDED
    
    | @@ -0,0 +1,121 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import torch.nn.functional as F
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            from .lietorch import SE3, Sim3
         | 
| 5 | 
            +
             | 
| 6 | 
            +
            MIN_DEPTH = 0.2
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            def extract_intrinsics(intrinsics):
         | 
| 9 | 
            +
                return intrinsics[...,None,None,:].unbind(dim=-1)
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            def coords_grid(ht, wd, **kwargs):
         | 
| 12 | 
            +
                y, x = torch.meshgrid(
         | 
| 13 | 
            +
                    torch.arange(ht).to(**kwargs).float(),
         | 
| 14 | 
            +
                    torch.arange(wd).to(**kwargs).float())
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                return torch.stack([x, y], dim=-1)
         | 
| 17 | 
            +
             | 
| 18 | 
            +
             | 
| 19 | 
            +
            def iproj(patches, intrinsics):
         | 
| 20 | 
            +
                """ inverse projection """
         | 
| 21 | 
            +
                x, y, d = patches.unbind(dim=2)
         | 
| 22 | 
            +
                fx, fy, cx, cy = intrinsics[...,None,None].unbind(dim=2)
         | 
| 23 | 
            +
             | 
| 24 | 
            +
                i = torch.ones_like(d)
         | 
| 25 | 
            +
                xn = (x - cx) / fx
         | 
| 26 | 
            +
                yn = (y - cy) / fy
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                X = torch.stack([xn, yn, i, d], dim=-1)
         | 
| 29 | 
            +
                return X
         | 
| 30 | 
            +
             | 
| 31 | 
            +
             | 
| 32 | 
            +
            def proj(X, intrinsics, depth=False):
         | 
| 33 | 
            +
                """ projection """
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                X, Y, Z, W = X.unbind(dim=-1)
         | 
| 36 | 
            +
                fx, fy, cx, cy = intrinsics[...,None,None].unbind(dim=2)
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                # d = 0.01 * torch.ones_like(Z)
         | 
| 39 | 
            +
                # d[Z > 0.01] = 1.0 / Z[Z > 0.01]
         | 
| 40 | 
            +
                # d = torch.ones_like(Z)
         | 
| 41 | 
            +
                # d[Z.abs() > 0.1] = 1.0 / Z[Z.abs() > 0.1]
         | 
| 42 | 
            +
             | 
| 43 | 
            +
                d = 1.0 / Z.clamp(min=0.1)
         | 
| 44 | 
            +
                x = fx * (d * X) + cx
         | 
| 45 | 
            +
                y = fy * (d * Y) + cy
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                if depth:
         | 
| 48 | 
            +
                    return torch.stack([x, y, d], dim=-1)
         | 
| 49 | 
            +
             | 
| 50 | 
            +
                return torch.stack([x, y], dim=-1)
         | 
| 51 | 
            +
             | 
| 52 | 
            +
             | 
| 53 | 
            +
            def transform(poses, patches, intrinsics, ii, jj, kk, depth=False, valid=False, jacobian=False, tonly=False):
         | 
| 54 | 
            +
                """ projective transform """
         | 
| 55 | 
            +
             | 
| 56 | 
            +
                # backproject
         | 
| 57 | 
            +
                X0 = iproj(patches[:,kk], intrinsics[:,ii])
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                # transform
         | 
| 60 | 
            +
                Gij = poses[:, jj] * poses[:, ii].inv()
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                if tonly:
         | 
| 63 | 
            +
                    Gij[...,3:] = torch.as_tensor([0,0,0,1], device=Gij.device)
         | 
| 64 | 
            +
             | 
| 65 | 
            +
                X1 = Gij[:,:,None,None] * X0
         | 
| 66 | 
            +
             | 
| 67 | 
            +
                # project
         | 
| 68 | 
            +
                x1 = proj(X1, intrinsics[:,jj], depth)
         | 
| 69 | 
            +
             | 
| 70 | 
            +
             | 
| 71 | 
            +
                if jacobian:
         | 
| 72 | 
            +
                    p = X1.shape[2]
         | 
| 73 | 
            +
                    X, Y, Z, H = X1[...,p//2,p//2,:].unbind(dim=-1)
         | 
| 74 | 
            +
                    o = torch.zeros_like(H)
         | 
| 75 | 
            +
                    i = torch.zeros_like(H)
         | 
| 76 | 
            +
             | 
| 77 | 
            +
                    fx, fy, cx, cy = intrinsics[:,jj].unbind(dim=-1)
         | 
| 78 | 
            +
             | 
| 79 | 
            +
                    d = torch.zeros_like(Z)
         | 
| 80 | 
            +
                    d[Z.abs() > 0.2] = 1.0 / Z[Z.abs() > 0.2]
         | 
| 81 | 
            +
             | 
| 82 | 
            +
                    Ja = torch.stack([
         | 
| 83 | 
            +
                        H,  o,  o,  o,  Z, -Y,
         | 
| 84 | 
            +
                        o,  H,  o, -Z,  o,  X, 
         | 
| 85 | 
            +
                        o,  o,  H,  Y, -X,  o,
         | 
| 86 | 
            +
                        o,  o,  o,  o,  o,  o,
         | 
| 87 | 
            +
                    ], dim=-1).view(1, len(ii), 4, 6)
         | 
| 88 | 
            +
                    
         | 
| 89 | 
            +
                    Jp = torch.stack([
         | 
| 90 | 
            +
                         fx*d,     o, -fx*X*d*d,  o,
         | 
| 91 | 
            +
                            o,  fy*d, -fy*Y*d*d,  o,
         | 
| 92 | 
            +
                    ], dim=-1).view(1, len(ii), 2, 4)
         | 
| 93 | 
            +
             | 
| 94 | 
            +
                    Jj = torch.matmul(Jp, Ja)
         | 
| 95 | 
            +
                    Ji = -Gij[:,:,None].adjT(Jj)
         | 
| 96 | 
            +
                    
         | 
| 97 | 
            +
                    Jz = torch.matmul(Jp, Gij.matrix()[...,:,3:])
         | 
| 98 | 
            +
             | 
| 99 | 
            +
                    return x1, (Z > 0.2).float(), (Ji, Jj, Jz)
         | 
| 100 | 
            +
             | 
| 101 | 
            +
                if valid:
         | 
| 102 | 
            +
                    return x1, (X1[...,2] > 0.2).float()
         | 
| 103 | 
            +
                    
         | 
| 104 | 
            +
                return x1
         | 
| 105 | 
            +
             | 
| 106 | 
            +
            def point_cloud(poses, patches, intrinsics, ix):
         | 
| 107 | 
            +
                """ generate point cloud from patches """
         | 
| 108 | 
            +
                return poses[:,ix,None,None].inv() * iproj(patches, intrinsics[:,ix])
         | 
| 109 | 
            +
             | 
| 110 | 
            +
             | 
| 111 | 
            +
            def flow_mag(poses, patches, intrinsics, ii, jj, kk, beta=0.3):
         | 
| 112 | 
            +
                """ projective transform """
         | 
| 113 | 
            +
             | 
| 114 | 
            +
                coords0 = transform(poses, patches, intrinsics, ii, ii, kk)
         | 
| 115 | 
            +
                coords1 = transform(poses, patches, intrinsics, ii, jj, kk, tonly=False)
         | 
| 116 | 
            +
                coords2 = transform(poses, patches, intrinsics, ii, jj, kk, tonly=True)
         | 
| 117 | 
            +
             | 
| 118 | 
            +
                flow1 = (coords1 - coords0).norm(dim=-1)
         | 
| 119 | 
            +
                flow2 = (coords2 - coords0).norm(dim=-1)
         | 
| 120 | 
            +
             | 
| 121 | 
            +
                return beta * flow1 + (1-beta) * flow2
         | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/stream.py
    ADDED
    
    | @@ -0,0 +1,87 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import os
         | 
| 2 | 
            +
            import cv2
         | 
| 3 | 
            +
            import numpy as np
         | 
| 4 | 
            +
            from multiprocessing import Process, Queue
         | 
| 5 | 
            +
            from pathlib import Path
         | 
| 6 | 
            +
            from itertools import chain
         | 
| 7 | 
            +
             | 
| 8 | 
            +
            def image_stream(queue, imagedir, calib, stride, skip=0):
         | 
| 9 | 
            +
                """ image generator """
         | 
| 10 | 
            +
             | 
| 11 | 
            +
                calib = np.loadtxt(calib, delimiter=" ")
         | 
| 12 | 
            +
                fx, fy, cx, cy = calib[:4]
         | 
| 13 | 
            +
             | 
| 14 | 
            +
                K = np.eye(3)
         | 
| 15 | 
            +
                K[0,0] = fx
         | 
| 16 | 
            +
                K[0,2] = cx
         | 
| 17 | 
            +
                K[1,1] = fy
         | 
| 18 | 
            +
                K[1,2] = cy
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                img_exts = ["*.png", "*.jpeg", "*.jpg"]
         | 
| 21 | 
            +
                image_list = sorted(chain.from_iterable(Path(imagedir).glob(e) for e in img_exts))[skip::stride]
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                for t, imfile in enumerate(image_list):
         | 
| 24 | 
            +
                    image = cv2.imread(str(imfile))
         | 
| 25 | 
            +
                    if len(calib) > 4:
         | 
| 26 | 
            +
                        image = cv2.undistort(image, K, calib[4:])
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                    if 0:
         | 
| 29 | 
            +
                        image = cv2.resize(image, None, fx=0.5, fy=0.5)
         | 
| 30 | 
            +
                        intrinsics = np.array([fx / 2, fy / 2, cx / 2, cy / 2])
         | 
| 31 | 
            +
             | 
| 32 | 
            +
                    else:
         | 
| 33 | 
            +
                        intrinsics = np.array([fx, fy, cx, cy])
         | 
| 34 | 
            +
                        
         | 
| 35 | 
            +
                    h, w, _ = image.shape
         | 
| 36 | 
            +
                    image = image[:h-h%16, :w-w%16]
         | 
| 37 | 
            +
             | 
| 38 | 
            +
                    queue.put((t, image, intrinsics))
         | 
| 39 | 
            +
             | 
| 40 | 
            +
                queue.put((-1, image, intrinsics))
         | 
| 41 | 
            +
             | 
| 42 | 
            +
             | 
| 43 | 
            +
            def video_stream(queue, imagedir, calib, stride, skip=0):
         | 
| 44 | 
            +
                """ video generator """
         | 
| 45 | 
            +
             | 
| 46 | 
            +
                calib = np.loadtxt(calib, delimiter=" ")
         | 
| 47 | 
            +
                fx, fy, cx, cy = calib[:4]
         | 
| 48 | 
            +
             | 
| 49 | 
            +
                K = np.eye(3)
         | 
| 50 | 
            +
                K[0,0] = fx
         | 
| 51 | 
            +
                K[0,2] = cx
         | 
| 52 | 
            +
                K[1,1] = fy
         | 
| 53 | 
            +
                K[1,2] = cy
         | 
| 54 | 
            +
             | 
| 55 | 
            +
                cap = cv2.VideoCapture(imagedir)
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                t = 0
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                for _ in range(skip):
         | 
| 60 | 
            +
                    ret, image = cap.read()
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                while True:
         | 
| 63 | 
            +
                    # Capture frame-by-frame
         | 
| 64 | 
            +
                    for _ in range(stride):
         | 
| 65 | 
            +
                        ret, image = cap.read()
         | 
| 66 | 
            +
                        # if frame is read correctly ret is True
         | 
| 67 | 
            +
                        if not ret:
         | 
| 68 | 
            +
                            break
         | 
| 69 | 
            +
             | 
| 70 | 
            +
                    if not ret:
         | 
| 71 | 
            +
                        break
         | 
| 72 | 
            +
             | 
| 73 | 
            +
                    if len(calib) > 4:
         | 
| 74 | 
            +
                        image = cv2.undistort(image, K, calib[4:])
         | 
| 75 | 
            +
             | 
| 76 | 
            +
                    image = cv2.resize(image, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)
         | 
| 77 | 
            +
                    h, w, _ = image.shape
         | 
| 78 | 
            +
                    image = image[:h-h%16, :w-w%16]
         | 
| 79 | 
            +
             | 
| 80 | 
            +
                    intrinsics = np.array([fx*.5, fy*.5, cx*.5, cy*.5])
         | 
| 81 | 
            +
                    queue.put((t, image, intrinsics))
         | 
| 82 | 
            +
             | 
| 83 | 
            +
                    t += 1
         | 
| 84 | 
            +
             | 
| 85 | 
            +
                queue.put((-1, image, intrinsics))
         | 
| 86 | 
            +
                cap.release()
         | 
| 87 | 
            +
             | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/dpvo/utils.py
    ADDED
    
    | @@ -0,0 +1,87 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
            import torch.nn.functional as F
         | 
| 3 | 
            +
             | 
| 4 | 
            +
             | 
| 5 | 
            +
            all_times = []
         | 
| 6 | 
            +
             | 
| 7 | 
            +
            class Timer:
         | 
| 8 | 
            +
                def __init__(self, name, enabled=True):
         | 
| 9 | 
            +
                    self.name = name
         | 
| 10 | 
            +
                    self.enabled = enabled
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                    if self.enabled:
         | 
| 13 | 
            +
                        self.start = torch.cuda.Event(enable_timing=True)
         | 
| 14 | 
            +
                        self.end = torch.cuda.Event(enable_timing=True)
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                def __enter__(self):
         | 
| 17 | 
            +
                    if self.enabled:
         | 
| 18 | 
            +
                        self.start.record()
         | 
| 19 | 
            +
                    
         | 
| 20 | 
            +
                def __exit__(self, type, value, traceback):
         | 
| 21 | 
            +
                    global all_times
         | 
| 22 | 
            +
                    if self.enabled:
         | 
| 23 | 
            +
                        self.end.record()
         | 
| 24 | 
            +
                        torch.cuda.synchronize()
         | 
| 25 | 
            +
             | 
| 26 | 
            +
                        elapsed = self.start.elapsed_time(self.end)
         | 
| 27 | 
            +
                        all_times.append(elapsed)
         | 
| 28 | 
            +
                        print(self.name, elapsed)
         | 
| 29 | 
            +
             | 
| 30 | 
            +
             | 
| 31 | 
            +
            def coords_grid(b, n, h, w, **kwargs):
         | 
| 32 | 
            +
                """ coordinate grid """
         | 
| 33 | 
            +
                x = torch.arange(0, w, dtype=torch.float, **kwargs)
         | 
| 34 | 
            +
                y = torch.arange(0, h, dtype=torch.float, **kwargs)
         | 
| 35 | 
            +
                coords = torch.stack(torch.meshgrid(y, x, indexing="ij"))
         | 
| 36 | 
            +
                return coords[[1,0]].view(1, 1, 2, h, w).repeat(b, n, 1, 1, 1)
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            def coords_grid_with_index(d, **kwargs):
         | 
| 39 | 
            +
                """ coordinate grid with frame index"""
         | 
| 40 | 
            +
                b, n, h, w = d.shape
         | 
| 41 | 
            +
                i = torch.ones_like(d)
         | 
| 42 | 
            +
                x = torch.arange(0, w, dtype=torch.float, **kwargs)
         | 
| 43 | 
            +
                y = torch.arange(0, h, dtype=torch.float, **kwargs)
         | 
| 44 | 
            +
             | 
| 45 | 
            +
                y, x = torch.stack(torch.meshgrid(y, x, indexing="ij"))
         | 
| 46 | 
            +
                y = y.view(1, 1, h, w).repeat(b, n, 1, 1)
         | 
| 47 | 
            +
                x = x.view(1, 1, h, w).repeat(b, n, 1, 1)
         | 
| 48 | 
            +
             | 
| 49 | 
            +
                coords = torch.stack([x, y, d], dim=2)
         | 
| 50 | 
            +
                index = torch.arange(0, n, dtype=torch.float, **kwargs)
         | 
| 51 | 
            +
                index = index.view(1, n, 1, 1, 1).repeat(b, 1, 1, h, w)
         | 
| 52 | 
            +
             | 
| 53 | 
            +
                return coords, index
         | 
| 54 | 
            +
             | 
| 55 | 
            +
            def patchify(x, patch_size=3):
         | 
| 56 | 
            +
                """ extract patches from video """
         | 
| 57 | 
            +
                b, n, c, h, w = x.shape
         | 
| 58 | 
            +
                x = x.view(b*n, c, h, w)
         | 
| 59 | 
            +
                y = F.unfold(x, patch_size)
         | 
| 60 | 
            +
                y = y.transpose(1,2)
         | 
| 61 | 
            +
                return y.reshape(b, -1, c, patch_size, patch_size)
         | 
| 62 | 
            +
             | 
| 63 | 
            +
             | 
| 64 | 
            +
            def pyramidify(fmap, lvls=[1]):
         | 
| 65 | 
            +
                """ turn fmap into a pyramid """
         | 
| 66 | 
            +
                b, n, c, h, w = fmap.shape
         | 
| 67 | 
            +
             | 
| 68 | 
            +
                pyramid = []
         | 
| 69 | 
            +
                for lvl in lvls:
         | 
| 70 | 
            +
                    gmap =  F.avg_pool2d(fmap.view(b*n, c, h, w), lvl, stride=lvl)
         | 
| 71 | 
            +
                    pyramid += [ gmap.view(b, n, c, h//lvl, w//lvl) ]
         | 
| 72 | 
            +
                    
         | 
| 73 | 
            +
                return pyramid
         | 
| 74 | 
            +
             | 
| 75 | 
            +
            def all_pairs_exclusive(n, **kwargs):
         | 
| 76 | 
            +
                ii, jj = torch.meshgrid(torch.arange(n, **kwargs), torch.arange(n, **kwargs))
         | 
| 77 | 
            +
                k = ii != jj
         | 
| 78 | 
            +
                return ii[k].reshape(-1), jj[k].reshape(-1)
         | 
| 79 | 
            +
             | 
| 80 | 
            +
            def set_depth(patches, depth):
         | 
| 81 | 
            +
                patches[...,2,:,:] = depth[...,None,None]
         | 
| 82 | 
            +
                return patches
         | 
| 83 | 
            +
             | 
| 84 | 
            +
            def flatmeshgrid(*args, **kwargs):
         | 
| 85 | 
            +
                grid = torch.meshgrid(*args, **kwargs)
         | 
| 86 | 
            +
                return (x.reshape(-1) for x in grid)
         | 
| 87 | 
            +
             | 
    	
        third-party/DPVO/build/lib.win-amd64-3.9/lietorch_backends.cp39-win_amd64.pyd
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:c2c6a2dd2d29f5ee56eeb0912681982366f36a0bc7420a1ea05b05d64d761a2f
         | 
| 3 | 
            +
            size 2983936
         | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/.ninja_deps
    ADDED
    
    | Binary file (987 kB). View file | 
|  | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/.ninja_log
    ADDED
    
    | @@ -0,0 +1,6 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            # ninja log v5
         | 
| 2 | 
            +
            0	10746	7510839870546033	C:/Users/thpap/PycharmProjects/motionbert-meta-sapiens/WHAM/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation.obj	8a6ea87c38b5029e
         | 
| 3 | 
            +
            5	34954	7510840112325833	C:/Users/thpap/PycharmProjects/motionbert-meta-sapiens/WHAM/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation_kernel.obj	f7d1343ce924f3b5
         | 
| 4 | 
            +
            7	10971	7510840239521303	C:/Users/thpap/PycharmProjects/motionbert-meta-sapiens/WHAM/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba.obj	1448633d654dbba0
         | 
| 5 | 
            +
            1	33893	7510840468461448	C:/Users/thpap/PycharmProjects/motionbert-meta-sapiens/WHAM/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba_cuda.obj	d1400b5b4a6daa70
         | 
| 6 | 
            +
            0	10676	7510840592828284	C:/Users/thpap/PycharmProjects/motionbert-meta-sapiens/WHAM/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch.obj	ba69a789264375ff
         | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/build.ninja
    ADDED
    
    | @@ -0,0 +1,29 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ninja_required_version = 1.3
         | 
| 2 | 
            +
            cxx = cl
         | 
| 3 | 
            +
            nvcc = C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.3\bin\nvcc
         | 
| 4 | 
            +
             | 
| 5 | 
            +
            cflags = /nologo /Ox /W3 /GL /DNDEBUG /MD /MD /wd4819 /wd4251 /wd4244 /wd4267 /wd4275 /wd4018 /wd4190 /EHsc -IC:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\dpvo/lietorch/include -IC:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\thirdparty/eigen-3.4.0 -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\torch\csrc\api\include -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\TH -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\THC "-IC:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.3\include" -IC:\Users\thpap\miniconda3\envs\wham\include -IC:\Users\thpap\miniconda3\envs\wham\include "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include" "-IC:\Program Files (x86)\Windows Kits\NETFXSDK\4.8\include\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\ucrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\shared" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\winrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\cppwinrt" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include" "-IC:\Program Files (x86)\Windows Kits\NETFXSDK\4.8\include\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\ucrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\shared" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\winrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\cppwinrt"
         | 
| 6 | 
            +
            post_cflags = -O3 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=lietorch_backends -D_GLIBCXX_USE_CXX11_ABI=0 /std:c++14
         | 
| 7 | 
            +
            cuda_cflags = --use-local-env -Xcompiler /MD -Xcompiler /wd4819 -Xcompiler /wd4251 -Xcompiler /wd4244 -Xcompiler /wd4267 -Xcompiler /wd4275 -Xcompiler /wd4018 -Xcompiler /wd4190 -Xcompiler /EHsc -Xcudafe --diag_suppress=base_class_has_different_dll_interface -Xcudafe --diag_suppress=field_without_dll_interface -Xcudafe --diag_suppress=dll_interface_conflict_none_assumed -Xcudafe --diag_suppress=dll_interface_conflict_dllexport_assumed -IC:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\dpvo/lietorch/include -IC:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\thirdparty/eigen-3.4.0 -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\torch\csrc\api\include -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\TH -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\THC "-IC:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.3\include" -IC:\Users\thpap\miniconda3\envs\wham\include -IC:\Users\thpap\miniconda3\envs\wham\include "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include" "-IC:\Program Files (x86)\Windows Kits\NETFXSDK\4.8\include\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\ucrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\shared" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\winrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\cppwinrt" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include" "-IC:\Program Files (x86)\Windows Kits\NETFXSDK\4.8\include\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\ucrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\shared" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\winrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\cppwinrt"
         | 
| 8 | 
            +
            cuda_post_cflags = -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -O3 --allow-unsupported-compiler -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=lietorch_backends -D_GLIBCXX_USE_CXX11_ABI=0 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_86,code=sm_86
         | 
| 9 | 
            +
            ldflags = 
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            rule compile
         | 
| 12 | 
            +
              command = cl /showIncludes $cflags -c $in /Fo$out $post_cflags
         | 
| 13 | 
            +
              deps = msvc
         | 
| 14 | 
            +
             | 
| 15 | 
            +
            rule cuda_compile
         | 
| 16 | 
            +
              depfile = $out.d
         | 
| 17 | 
            +
              deps = gcc
         | 
| 18 | 
            +
              command = $nvcc --generate-dependencies-with-compile --dependency-output $out.d $cuda_cflags -c $in -o $out $cuda_post_cflags
         | 
| 19 | 
            +
             | 
| 20 | 
            +
             | 
| 21 | 
            +
             | 
| 22 | 
            +
            build C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\build\temp.win-amd64-3.9\Release\dpvo/lietorch/src/lietorch.obj: compile C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\dpvo\lietorch\src\lietorch.cpp
         | 
| 23 | 
            +
            build C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\build\temp.win-amd64-3.9\Release\dpvo/lietorch/src/lietorch_cpu.obj: compile C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\dpvo\lietorch\src\lietorch_cpu.cpp
         | 
| 24 | 
            +
            build C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\build\temp.win-amd64-3.9\Release\dpvo/lietorch/src/lietorch_gpu.obj: cuda_compile C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\dpvo\lietorch\src\lietorch_gpu.cu
         | 
| 25 | 
            +
             | 
| 26 | 
            +
             | 
| 27 | 
            +
             | 
| 28 | 
            +
             | 
| 29 | 
            +
             | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation.obj
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:7f1ef51859e20505bb755ea5e5ce48fba33b480d99b464405edb944ee5c40191
         | 
| 3 | 
            +
            size 37419806
         | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation_kernel.obj
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:c41ced021c0c59fdfba4692ce3769cbc85e0a7b72ad5a1c332c86ff5ca5015b3
         | 
| 3 | 
            +
            size 1461202
         | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/cuda_corr.cp39-win_amd64.exp
    ADDED
    
    | Binary file (17.5 kB). View file | 
|  | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/cuda_corr.cp39-win_amd64.lib
    ADDED
    
    | Binary file (30.3 kB). View file | 
|  | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba.obj
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:0c345d67f220f9216fd2ce229b4d4a25c91ba82568459eb137ed18ae96d77aad
         | 
| 3 | 
            +
            size 37657608
         | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba_cuda.obj
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:766b7b9f519617f73e61fefca21afde2a56852bfc92bec64cb88fcf60307657e
         | 
| 3 | 
            +
            size 1195236
         | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/cuda_ba.cp39-win_amd64.exp
    ADDED
    
    | Binary file (17.5 kB). View file | 
|  | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/cuda_ba.cp39-win_amd64.lib
    ADDED
    
    | Binary file (30.1 kB). View file | 
|  | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch.obj
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:fc39b10aa93073560e20f0b855a64775989f4368fc8dc067d086a10546a18c63
         | 
| 3 | 
            +
            size 44606503
         | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_backends.cp39-win_amd64.exp
    ADDED
    
    | Binary file (25.2 kB). View file | 
|  | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_backends.cp39-win_amd64.lib
    ADDED
    
    | Binary file (44.2 kB). View file | 
|  | 
    	
        third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_cpu.obj
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:1957cc4bf3d2edd2496ff09abebd3da682200fc79241e85be4ba902323d90d47
         | 
| 3 | 
            +
            size 124596555
         | 
