File size: 1,459 Bytes
3ef1661 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import os
import json
import torch
import torchvision.transforms as transforms
import os.path
import numpy as np
import cv2
from torch.utils.data import Dataset
import random
from .__base_dataset__ import BaseDataset
class UASOLDataset(BaseDataset):
def __init__(self, cfg, phase, **kwargs):
super(UASOLDataset, self).__init__(
cfg=cfg,
phase=phase,
**kwargs)
self.metric_scale = cfg.metric_scale
def process_depth(self, depth, rgb):
depth[depth>65500] = 0
depth /= self.metric_scale
return depth
def load_rgb_depth(self, rgb_path: str, depth_path: str) -> (np.array, np.array):
"""
Load the rgb and depth map with the paths.
"""
rgb = self.load_data(rgb_path, is_rgb_img=True)
if rgb is None:
self.logger.info(f'>>>>{rgb_path} has errors.')
depth = self.load_data(depth_path)
if depth is None:
self.logger.info(f'{depth_path} has errors.')
depth = depth.astype(np.float)
depth = self.process_depth(depth, rgb)
depth = depth[1:-1, ...]
return rgb, depth
if __name__ == '__main__':
from mmcv.utils import Config
cfg = Config.fromfile('mono/configs/Apolloscape_DDAD/convnext_base.cascade.1m.sgd.mae.py')
dataset_i = UASOLDataset(cfg['Apolloscape'], 'train', **cfg.data_basic)
print(dataset_i)
|