LiheYoung commited on
Commit
b401682
·
verified ·
1 Parent(s): ac64c09

Delete metric_depth

Browse files
Files changed (31) hide show
  1. metric_depth/README.md +0 -55
  2. metric_depth/assets/compare_zoedepth.png +0 -3
  3. metric_depth/dataset/hypersim.py +0 -74
  4. metric_depth/dataset/kitti.py +0 -57
  5. metric_depth/dataset/splits/hypersim/train.txt +0 -3
  6. metric_depth/dataset/splits/hypersim/val.txt +0 -0
  7. metric_depth/dataset/splits/kitti/val.txt +0 -0
  8. metric_depth/dataset/splits/vkitti2/train.txt +0 -0
  9. metric_depth/dataset/transform.py +0 -277
  10. metric_depth/dataset/vkitti2.py +0 -54
  11. metric_depth/depth_anything_v2/dinov2.py +0 -415
  12. metric_depth/depth_anything_v2/dinov2_layers/__init__.py +0 -11
  13. metric_depth/depth_anything_v2/dinov2_layers/attention.py +0 -83
  14. metric_depth/depth_anything_v2/dinov2_layers/block.py +0 -252
  15. metric_depth/depth_anything_v2/dinov2_layers/drop_path.py +0 -35
  16. metric_depth/depth_anything_v2/dinov2_layers/layer_scale.py +0 -28
  17. metric_depth/depth_anything_v2/dinov2_layers/mlp.py +0 -41
  18. metric_depth/depth_anything_v2/dinov2_layers/patch_embed.py +0 -89
  19. metric_depth/depth_anything_v2/dinov2_layers/swiglu_ffn.py +0 -63
  20. metric_depth/depth_anything_v2/dpt.py +0 -222
  21. metric_depth/depth_anything_v2/util/blocks.py +0 -148
  22. metric_depth/depth_anything_v2/util/transform.py +0 -158
  23. metric_depth/depth_to_pointcloud.py +0 -83
  24. metric_depth/dist_train.sh +0 -26
  25. metric_depth/requirements.txt +0 -5
  26. metric_depth/run.py +0 -81
  27. metric_depth/train.py +0 -212
  28. metric_depth/util/dist_helper.py +0 -41
  29. metric_depth/util/loss.py +0 -16
  30. metric_depth/util/metric.py +0 -26
  31. metric_depth/util/utils.py +0 -26
metric_depth/README.md DELETED
@@ -1,55 +0,0 @@
1
- # Depth Anything V2 for Metric Depth Estimation
2
-
3
- ![teaser](./assets/compare_zoedepth.png)
4
-
5
- We here provide a simple codebase to fine-tune our Depth Anything V2 pre-trained encoder for metric depth estimation. Built on our powerful encoder, we use a simple DPT head to regress the depth. We fine-tune our pre-trained encoder on synthetic Hypersim / Virtual KITTI datasets for indoor / outdoor metric depth estimation, respectively.
6
-
7
-
8
- ## Usage
9
-
10
- ### Inference
11
-
12
- Please first download our pre-trained metric depth models and put them under the `checkpoints` directory:
13
- - [Indoor model from Hypersim](https://huggingface.co/depth-anything/Depth-Anything-V2-Metric-Hypersim-Large/resolve/main/depth_anything_v2_metric_hypersim_vitl.pth?download=true)
14
- - [Outdoor model from Virtual KITTI 2](https://huggingface.co/depth-anything/Depth-Anything-V2-Metric-VKITTI-Large/resolve/main/depth_anything_v2_metric_vkitti_vitl.pth?download=true)
15
-
16
- ```bash
17
- # indoor scenes
18
- python run.py \
19
- --encoder vitl --load-from checkpoints/depth_anything_v2_metric_hypersim_vitl.pth \
20
- --max-depth 20 --img-path <path> --outdir <outdir> [--input-size <size>] [--save-numpy]
21
-
22
- # outdoor scenes
23
- python run.py \
24
- --encoder vitl --load-from checkpoints/depth_anything_v2_metric_vkitti_vitl.pth \
25
- --max-depth 80 --img-path <path> --outdir <outdir> [--input-size <size>] [--save-numpy]
26
- ```
27
-
28
- You can also project 2D images to point clouds:
29
- ```bash
30
- python depth_to_pointcloud.py \
31
- --encoder vitl --load-from checkpoints/depth_anything_v2_metric_hypersim_vitl.pth \
32
- --max-depth 20 --img-path <path> --outdir <outdir>
33
- ```
34
-
35
- ### Reproduce training
36
-
37
- Please first prepare the [Hypersim](https://github.com/apple/ml-hypersim) and [Virtual KITTI 2](https://europe.naverlabs.com/research/computer-vision/proxy-virtual-worlds-vkitti-2/) datasets. Then:
38
-
39
- ```bash
40
- bash dist_train.sh
41
- ```
42
-
43
-
44
- ## Citation
45
-
46
- If you find this project useful, please consider citing:
47
-
48
- ```bibtex
49
- @article{depth_anything_v2,
50
- title={Depth Anything V2},
51
- author={Yang, Lihe and Kang, Bingyi and Huang, Zilong and Zhao, Zhen and Xu, Xiaogang and Feng, Jiashi and Zhao, Hengshuang},
52
- journal={arXiv:2406.09414},
53
- year={2024}
54
- }
55
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/assets/compare_zoedepth.png DELETED

Git LFS Details

  • SHA256: 8044e39ef6cb4aaabea9a81333fa1ff2d3e07448e7f9f43f77f471aba72a12e0
  • Pointer size: 132 Bytes
  • Size of remote file: 9.19 MB
metric_depth/dataset/hypersim.py DELETED
@@ -1,74 +0,0 @@
1
- import cv2
2
- import h5py
3
- import numpy as np
4
- import torch
5
- from torch.utils.data import Dataset
6
- from torchvision.transforms import Compose
7
-
8
- from dataset.transform import Resize, NormalizeImage, PrepareForNet, Crop
9
-
10
-
11
- def hypersim_distance_to_depth(npyDistance):
12
- intWidth, intHeight, fltFocal = 1024, 768, 886.81
13
-
14
- npyImageplaneX = np.linspace((-0.5 * intWidth) + 0.5, (0.5 * intWidth) - 0.5, intWidth).reshape(
15
- 1, intWidth).repeat(intHeight, 0).astype(np.float32)[:, :, None]
16
- npyImageplaneY = np.linspace((-0.5 * intHeight) + 0.5, (0.5 * intHeight) - 0.5,
17
- intHeight).reshape(intHeight, 1).repeat(intWidth, 1).astype(np.float32)[:, :, None]
18
- npyImageplaneZ = np.full([intHeight, intWidth, 1], fltFocal, np.float32)
19
- npyImageplane = np.concatenate(
20
- [npyImageplaneX, npyImageplaneY, npyImageplaneZ], 2)
21
-
22
- npyDepth = npyDistance / np.linalg.norm(npyImageplane, 2, 2) * fltFocal
23
- return npyDepth
24
-
25
-
26
- class Hypersim(Dataset):
27
- def __init__(self, filelist_path, mode, size=(518, 518)):
28
-
29
- self.mode = mode
30
- self.size = size
31
-
32
- with open(filelist_path, 'r') as f:
33
- self.filelist = f.read().splitlines()
34
-
35
- net_w, net_h = size
36
- self.transform = Compose([
37
- Resize(
38
- width=net_w,
39
- height=net_h,
40
- resize_target=True if mode == 'train' else False,
41
- keep_aspect_ratio=True,
42
- ensure_multiple_of=14,
43
- resize_method='lower_bound',
44
- image_interpolation_method=cv2.INTER_CUBIC,
45
- ),
46
- NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
47
- PrepareForNet(),
48
- ] + ([Crop(size[0])] if self.mode == 'train' else []))
49
-
50
- def __getitem__(self, item):
51
- img_path = self.filelist[item].split(' ')[0]
52
- depth_path = self.filelist[item].split(' ')[1]
53
-
54
- image = cv2.imread(img_path)
55
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0
56
-
57
- depth_fd = h5py.File(depth_path, "r")
58
- distance_meters = np.array(depth_fd['dataset'])
59
- depth = hypersim_distance_to_depth(distance_meters)
60
-
61
- sample = self.transform({'image': image, 'depth': depth})
62
-
63
- sample['image'] = torch.from_numpy(sample['image'])
64
- sample['depth'] = torch.from_numpy(sample['depth'])
65
-
66
- sample['valid_mask'] = (torch.isnan(sample['depth']) == 0)
67
- sample['depth'][sample['valid_mask'] == 0] = 0
68
-
69
- sample['image_path'] = self.filelist[item].split(' ')[0]
70
-
71
- return sample
72
-
73
- def __len__(self):
74
- return len(self.filelist)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/dataset/kitti.py DELETED
@@ -1,57 +0,0 @@
1
- import cv2
2
- import torch
3
- from torch.utils.data import Dataset
4
- from torchvision.transforms import Compose
5
-
6
- from dataset.transform import Resize, NormalizeImage, PrepareForNet
7
-
8
-
9
- class KITTI(Dataset):
10
- def __init__(self, filelist_path, mode, size=(518, 518)):
11
- if mode != 'val':
12
- raise NotImplementedError
13
-
14
- self.mode = mode
15
- self.size = size
16
-
17
- with open(filelist_path, 'r') as f:
18
- self.filelist = f.read().splitlines()
19
-
20
- net_w, net_h = size
21
- self.transform = Compose([
22
- Resize(
23
- width=net_w,
24
- height=net_h,
25
- resize_target=True if mode == 'train' else False,
26
- keep_aspect_ratio=True,
27
- ensure_multiple_of=14,
28
- resize_method='lower_bound',
29
- image_interpolation_method=cv2.INTER_CUBIC,
30
- ),
31
- NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
32
- PrepareForNet(),
33
- ])
34
-
35
- def __getitem__(self, item):
36
- img_path = self.filelist[item].split(' ')[0]
37
- depth_path = self.filelist[item].split(' ')[1]
38
-
39
- image = cv2.imread(img_path)
40
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0
41
-
42
- depth = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED).astype('float32')
43
-
44
- sample = self.transform({'image': image, 'depth': depth})
45
-
46
- sample['image'] = torch.from_numpy(sample['image'])
47
- sample['depth'] = torch.from_numpy(sample['depth'])
48
- sample['depth'] = sample['depth'] / 256.0 # convert in meters
49
-
50
- sample['valid_mask'] = sample['depth'] > 0
51
-
52
- sample['image_path'] = self.filelist[item].split(' ')[0]
53
-
54
- return sample
55
-
56
- def __len__(self):
57
- return len(self.filelist)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/dataset/splits/hypersim/train.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:47beb7c615a54d08dfa2f053787897455e845ad1b54d268194a6b431b01a04d0
3
- size 13694890
 
 
 
 
metric_depth/dataset/splits/hypersim/val.txt DELETED
The diff for this file is too large to render. See raw diff
 
metric_depth/dataset/splits/kitti/val.txt DELETED
The diff for this file is too large to render. See raw diff
 
metric_depth/dataset/splits/vkitti2/train.txt DELETED
The diff for this file is too large to render. See raw diff
 
metric_depth/dataset/transform.py DELETED
@@ -1,277 +0,0 @@
1
- import cv2
2
- import math
3
- import numpy as np
4
- import torch
5
- import torch.nn.functional as F
6
-
7
-
8
- def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
9
- """Rezise the sample to ensure the given size. Keeps aspect ratio.
10
-
11
- Args:
12
- sample (dict): sample
13
- size (tuple): image size
14
-
15
- Returns:
16
- tuple: new size
17
- """
18
- shape = list(sample["disparity"].shape)
19
-
20
- if shape[0] >= size[0] and shape[1] >= size[1]:
21
- return sample
22
-
23
- scale = [0, 0]
24
- scale[0] = size[0] / shape[0]
25
- scale[1] = size[1] / shape[1]
26
-
27
- scale = max(scale)
28
-
29
- shape[0] = math.ceil(scale * shape[0])
30
- shape[1] = math.ceil(scale * shape[1])
31
-
32
- # resize
33
- sample["image"] = cv2.resize(
34
- sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
35
- )
36
-
37
- sample["disparity"] = cv2.resize(
38
- sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
39
- )
40
- sample["mask"] = cv2.resize(
41
- sample["mask"].astype(np.float32),
42
- tuple(shape[::-1]),
43
- interpolation=cv2.INTER_NEAREST,
44
- )
45
- sample["mask"] = sample["mask"].astype(bool)
46
-
47
- return tuple(shape)
48
-
49
-
50
- class Resize(object):
51
- """Resize sample to given size (width, height).
52
- """
53
-
54
- def __init__(
55
- self,
56
- width,
57
- height,
58
- resize_target=True,
59
- keep_aspect_ratio=False,
60
- ensure_multiple_of=1,
61
- resize_method="lower_bound",
62
- image_interpolation_method=cv2.INTER_AREA,
63
- ):
64
- """Init.
65
-
66
- Args:
67
- width (int): desired output width
68
- height (int): desired output height
69
- resize_target (bool, optional):
70
- True: Resize the full sample (image, mask, target).
71
- False: Resize image only.
72
- Defaults to True.
73
- keep_aspect_ratio (bool, optional):
74
- True: Keep the aspect ratio of the input sample.
75
- Output sample might not have the given width and height, and
76
- resize behaviour depends on the parameter 'resize_method'.
77
- Defaults to False.
78
- ensure_multiple_of (int, optional):
79
- Output width and height is constrained to be multiple of this parameter.
80
- Defaults to 1.
81
- resize_method (str, optional):
82
- "lower_bound": Output will be at least as large as the given size.
83
- "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
84
- "minimal": Scale as least as possible. (Output size might be smaller than given size.)
85
- Defaults to "lower_bound".
86
- """
87
- self.__width = width
88
- self.__height = height
89
-
90
- self.__resize_target = resize_target
91
- self.__keep_aspect_ratio = keep_aspect_ratio
92
- self.__multiple_of = ensure_multiple_of
93
- self.__resize_method = resize_method
94
- self.__image_interpolation_method = image_interpolation_method
95
-
96
- def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
97
- y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
98
-
99
- if max_val is not None and y > max_val:
100
- y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
101
-
102
- if y < min_val:
103
- y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
104
-
105
- return y
106
-
107
- def get_size(self, width, height):
108
- # determine new height and width
109
- scale_height = self.__height / height
110
- scale_width = self.__width / width
111
-
112
- if self.__keep_aspect_ratio:
113
- if self.__resize_method == "lower_bound":
114
- # scale such that output size is lower bound
115
- if scale_width > scale_height:
116
- # fit width
117
- scale_height = scale_width
118
- else:
119
- # fit height
120
- scale_width = scale_height
121
- elif self.__resize_method == "upper_bound":
122
- # scale such that output size is upper bound
123
- if scale_width < scale_height:
124
- # fit width
125
- scale_height = scale_width
126
- else:
127
- # fit height
128
- scale_width = scale_height
129
- elif self.__resize_method == "minimal":
130
- # scale as least as possbile
131
- if abs(1 - scale_width) < abs(1 - scale_height):
132
- # fit width
133
- scale_height = scale_width
134
- else:
135
- # fit height
136
- scale_width = scale_height
137
- else:
138
- raise ValueError(
139
- f"resize_method {self.__resize_method} not implemented"
140
- )
141
-
142
- if self.__resize_method == "lower_bound":
143
- new_height = self.constrain_to_multiple_of(
144
- scale_height * height, min_val=self.__height
145
- )
146
- new_width = self.constrain_to_multiple_of(
147
- scale_width * width, min_val=self.__width
148
- )
149
- elif self.__resize_method == "upper_bound":
150
- new_height = self.constrain_to_multiple_of(
151
- scale_height * height, max_val=self.__height
152
- )
153
- new_width = self.constrain_to_multiple_of(
154
- scale_width * width, max_val=self.__width
155
- )
156
- elif self.__resize_method == "minimal":
157
- new_height = self.constrain_to_multiple_of(scale_height * height)
158
- new_width = self.constrain_to_multiple_of(scale_width * width)
159
- else:
160
- raise ValueError(f"resize_method {self.__resize_method} not implemented")
161
-
162
- return (new_width, new_height)
163
-
164
- def __call__(self, sample):
165
- width, height = self.get_size(
166
- sample["image"].shape[1], sample["image"].shape[0]
167
- )
168
-
169
- # resize sample
170
- sample["image"] = cv2.resize(
171
- sample["image"],
172
- (width, height),
173
- interpolation=self.__image_interpolation_method,
174
- )
175
-
176
- if self.__resize_target:
177
- if "disparity" in sample:
178
- sample["disparity"] = cv2.resize(
179
- sample["disparity"],
180
- (width, height),
181
- interpolation=cv2.INTER_NEAREST,
182
- )
183
-
184
- if "depth" in sample:
185
- sample["depth"] = cv2.resize(
186
- sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
187
- )
188
-
189
- if "semseg_mask" in sample:
190
- # sample["semseg_mask"] = cv2.resize(
191
- # sample["semseg_mask"], (width, height), interpolation=cv2.INTER_NEAREST
192
- # )
193
- sample["semseg_mask"] = F.interpolate(torch.from_numpy(sample["semseg_mask"]).float()[None, None, ...], (height, width), mode='nearest').numpy()[0, 0]
194
-
195
- if "mask" in sample:
196
- sample["mask"] = cv2.resize(
197
- sample["mask"].astype(np.float32),
198
- (width, height),
199
- interpolation=cv2.INTER_NEAREST,
200
- )
201
- # sample["mask"] = sample["mask"].astype(bool)
202
-
203
- # print(sample['image'].shape, sample['depth'].shape)
204
- return sample
205
-
206
-
207
- class NormalizeImage(object):
208
- """Normlize image by given mean and std.
209
- """
210
-
211
- def __init__(self, mean, std):
212
- self.__mean = mean
213
- self.__std = std
214
-
215
- def __call__(self, sample):
216
- sample["image"] = (sample["image"] - self.__mean) / self.__std
217
-
218
- return sample
219
-
220
-
221
- class PrepareForNet(object):
222
- """Prepare sample for usage as network input.
223
- """
224
-
225
- def __init__(self):
226
- pass
227
-
228
- def __call__(self, sample):
229
- image = np.transpose(sample["image"], (2, 0, 1))
230
- sample["image"] = np.ascontiguousarray(image).astype(np.float32)
231
-
232
- if "mask" in sample:
233
- sample["mask"] = sample["mask"].astype(np.float32)
234
- sample["mask"] = np.ascontiguousarray(sample["mask"])
235
-
236
- if "depth" in sample:
237
- depth = sample["depth"].astype(np.float32)
238
- sample["depth"] = np.ascontiguousarray(depth)
239
-
240
- if "semseg_mask" in sample:
241
- sample["semseg_mask"] = sample["semseg_mask"].astype(np.float32)
242
- sample["semseg_mask"] = np.ascontiguousarray(sample["semseg_mask"])
243
-
244
- return sample
245
-
246
-
247
- class Crop(object):
248
- """Crop sample for batch-wise training. Image is of shape CxHxW
249
- """
250
-
251
- def __init__(self, size):
252
- if isinstance(size, int):
253
- self.size = (size, size)
254
- else:
255
- self.size = size
256
-
257
- def __call__(self, sample):
258
- h, w = sample['image'].shape[-2:]
259
- assert h >= self.size[0] and w >= self.size[1], 'Wrong size'
260
-
261
- h_start = np.random.randint(0, h - self.size[0] + 1)
262
- w_start = np.random.randint(0, w - self.size[1] + 1)
263
- h_end = h_start + self.size[0]
264
- w_end = w_start + self.size[1]
265
-
266
- sample['image'] = sample['image'][:, h_start: h_end, w_start: w_end]
267
-
268
- if "depth" in sample:
269
- sample["depth"] = sample["depth"][h_start: h_end, w_start: w_end]
270
-
271
- if "mask" in sample:
272
- sample["mask"] = sample["mask"][h_start: h_end, w_start: w_end]
273
-
274
- if "semseg_mask" in sample:
275
- sample["semseg_mask"] = sample["semseg_mask"][h_start: h_end, w_start: w_end]
276
-
277
- return sample
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/dataset/vkitti2.py DELETED
@@ -1,54 +0,0 @@
1
- import cv2
2
- import torch
3
- from torch.utils.data import Dataset
4
- from torchvision.transforms import Compose
5
-
6
- from dataset.transform import Resize, NormalizeImage, PrepareForNet, Crop
7
-
8
-
9
- class VKITTI2(Dataset):
10
- def __init__(self, filelist_path, mode, size=(518, 518)):
11
-
12
- self.mode = mode
13
- self.size = size
14
-
15
- with open(filelist_path, 'r') as f:
16
- self.filelist = f.read().splitlines()
17
-
18
- net_w, net_h = size
19
- self.transform = Compose([
20
- Resize(
21
- width=net_w,
22
- height=net_h,
23
- resize_target=True if mode == 'train' else False,
24
- keep_aspect_ratio=True,
25
- ensure_multiple_of=14,
26
- resize_method='lower_bound',
27
- image_interpolation_method=cv2.INTER_CUBIC,
28
- ),
29
- NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
30
- PrepareForNet(),
31
- ] + ([Crop(size[0])] if self.mode == 'train' else []))
32
-
33
- def __getitem__(self, item):
34
- img_path = self.filelist[item].split(' ')[0]
35
- depth_path = self.filelist[item].split(' ')[1]
36
-
37
- image = cv2.imread(img_path)
38
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0
39
-
40
- depth = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH) / 100.0 # cm to m
41
-
42
- sample = self.transform({'image': image, 'depth': depth})
43
-
44
- sample['image'] = torch.from_numpy(sample['image'])
45
- sample['depth'] = torch.from_numpy(sample['depth'])
46
-
47
- sample['valid_mask'] = (sample['depth'] <= 80)
48
-
49
- sample['image_path'] = self.filelist[item].split(' ')[0]
50
-
51
- return sample
52
-
53
- def __len__(self):
54
- return len(self.filelist)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/dinov2.py DELETED
@@ -1,415 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- #
3
- # This source code is licensed under the Apache License, Version 2.0
4
- # found in the LICENSE file in the root directory of this source tree.
5
-
6
- # References:
7
- # https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
8
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
9
-
10
- from functools import partial
11
- import math
12
- import logging
13
- from typing import Sequence, Tuple, Union, Callable
14
-
15
- import torch
16
- import torch.nn as nn
17
- import torch.utils.checkpoint
18
- from torch.nn.init import trunc_normal_
19
-
20
- from .dinov2_layers import Mlp, PatchEmbed, SwiGLUFFNFused, MemEffAttention, NestedTensorBlock as Block
21
-
22
-
23
- logger = logging.getLogger("dinov2")
24
-
25
-
26
- def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module:
27
- if not depth_first and include_root:
28
- fn(module=module, name=name)
29
- for child_name, child_module in module.named_children():
30
- child_name = ".".join((name, child_name)) if name else child_name
31
- named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
32
- if depth_first and include_root:
33
- fn(module=module, name=name)
34
- return module
35
-
36
-
37
- class BlockChunk(nn.ModuleList):
38
- def forward(self, x):
39
- for b in self:
40
- x = b(x)
41
- return x
42
-
43
-
44
- class DinoVisionTransformer(nn.Module):
45
- def __init__(
46
- self,
47
- img_size=224,
48
- patch_size=16,
49
- in_chans=3,
50
- embed_dim=768,
51
- depth=12,
52
- num_heads=12,
53
- mlp_ratio=4.0,
54
- qkv_bias=True,
55
- ffn_bias=True,
56
- proj_bias=True,
57
- drop_path_rate=0.0,
58
- drop_path_uniform=False,
59
- init_values=None, # for layerscale: None or 0 => no layerscale
60
- embed_layer=PatchEmbed,
61
- act_layer=nn.GELU,
62
- block_fn=Block,
63
- ffn_layer="mlp",
64
- block_chunks=1,
65
- num_register_tokens=0,
66
- interpolate_antialias=False,
67
- interpolate_offset=0.1,
68
- ):
69
- """
70
- Args:
71
- img_size (int, tuple): input image size
72
- patch_size (int, tuple): patch size
73
- in_chans (int): number of input channels
74
- embed_dim (int): embedding dimension
75
- depth (int): depth of transformer
76
- num_heads (int): number of attention heads
77
- mlp_ratio (int): ratio of mlp hidden dim to embedding dim
78
- qkv_bias (bool): enable bias for qkv if True
79
- proj_bias (bool): enable bias for proj in attn if True
80
- ffn_bias (bool): enable bias for ffn if True
81
- drop_path_rate (float): stochastic depth rate
82
- drop_path_uniform (bool): apply uniform drop rate across blocks
83
- weight_init (str): weight init scheme
84
- init_values (float): layer-scale init values
85
- embed_layer (nn.Module): patch embedding layer
86
- act_layer (nn.Module): MLP activation layer
87
- block_fn (nn.Module): transformer block class
88
- ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
89
- block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
90
- num_register_tokens: (int) number of extra cls tokens (so-called "registers")
91
- interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings
92
- interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings
93
- """
94
- super().__init__()
95
- norm_layer = partial(nn.LayerNorm, eps=1e-6)
96
-
97
- self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
98
- self.num_tokens = 1
99
- self.n_blocks = depth
100
- self.num_heads = num_heads
101
- self.patch_size = patch_size
102
- self.num_register_tokens = num_register_tokens
103
- self.interpolate_antialias = interpolate_antialias
104
- self.interpolate_offset = interpolate_offset
105
-
106
- self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
107
- num_patches = self.patch_embed.num_patches
108
-
109
- self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
110
- self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
111
- assert num_register_tokens >= 0
112
- self.register_tokens = (
113
- nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None
114
- )
115
-
116
- if drop_path_uniform is True:
117
- dpr = [drop_path_rate] * depth
118
- else:
119
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
120
-
121
- if ffn_layer == "mlp":
122
- logger.info("using MLP layer as FFN")
123
- ffn_layer = Mlp
124
- elif ffn_layer == "swiglufused" or ffn_layer == "swiglu":
125
- logger.info("using SwiGLU layer as FFN")
126
- ffn_layer = SwiGLUFFNFused
127
- elif ffn_layer == "identity":
128
- logger.info("using Identity layer as FFN")
129
-
130
- def f(*args, **kwargs):
131
- return nn.Identity()
132
-
133
- ffn_layer = f
134
- else:
135
- raise NotImplementedError
136
-
137
- blocks_list = [
138
- block_fn(
139
- dim=embed_dim,
140
- num_heads=num_heads,
141
- mlp_ratio=mlp_ratio,
142
- qkv_bias=qkv_bias,
143
- proj_bias=proj_bias,
144
- ffn_bias=ffn_bias,
145
- drop_path=dpr[i],
146
- norm_layer=norm_layer,
147
- act_layer=act_layer,
148
- ffn_layer=ffn_layer,
149
- init_values=init_values,
150
- )
151
- for i in range(depth)
152
- ]
153
- if block_chunks > 0:
154
- self.chunked_blocks = True
155
- chunked_blocks = []
156
- chunksize = depth // block_chunks
157
- for i in range(0, depth, chunksize):
158
- # this is to keep the block index consistent if we chunk the block list
159
- chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize])
160
- self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
161
- else:
162
- self.chunked_blocks = False
163
- self.blocks = nn.ModuleList(blocks_list)
164
-
165
- self.norm = norm_layer(embed_dim)
166
- self.head = nn.Identity()
167
-
168
- self.mask_token = nn.Parameter(torch.zeros(1, embed_dim))
169
-
170
- self.init_weights()
171
-
172
- def init_weights(self):
173
- trunc_normal_(self.pos_embed, std=0.02)
174
- nn.init.normal_(self.cls_token, std=1e-6)
175
- if self.register_tokens is not None:
176
- nn.init.normal_(self.register_tokens, std=1e-6)
177
- named_apply(init_weights_vit_timm, self)
178
-
179
- def interpolate_pos_encoding(self, x, w, h):
180
- previous_dtype = x.dtype
181
- npatch = x.shape[1] - 1
182
- N = self.pos_embed.shape[1] - 1
183
- if npatch == N and w == h:
184
- return self.pos_embed
185
- pos_embed = self.pos_embed.float()
186
- class_pos_embed = pos_embed[:, 0]
187
- patch_pos_embed = pos_embed[:, 1:]
188
- dim = x.shape[-1]
189
- w0 = w // self.patch_size
190
- h0 = h // self.patch_size
191
- # we add a small number to avoid floating point error in the interpolation
192
- # see discussion at https://github.com/facebookresearch/dino/issues/8
193
- # DINOv2 with register modify the interpolate_offset from 0.1 to 0.0
194
- w0, h0 = w0 + self.interpolate_offset, h0 + self.interpolate_offset
195
- # w0, h0 = w0 + 0.1, h0 + 0.1
196
-
197
- sqrt_N = math.sqrt(N)
198
- sx, sy = float(w0) / sqrt_N, float(h0) / sqrt_N
199
- patch_pos_embed = nn.functional.interpolate(
200
- patch_pos_embed.reshape(1, int(sqrt_N), int(sqrt_N), dim).permute(0, 3, 1, 2),
201
- scale_factor=(sx, sy),
202
- # (int(w0), int(h0)), # to solve the upsampling shape issue
203
- mode="bicubic",
204
- antialias=self.interpolate_antialias
205
- )
206
-
207
- assert int(w0) == patch_pos_embed.shape[-2]
208
- assert int(h0) == patch_pos_embed.shape[-1]
209
- patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
210
- return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype)
211
-
212
- def prepare_tokens_with_masks(self, x, masks=None):
213
- B, nc, w, h = x.shape
214
- x = self.patch_embed(x)
215
- if masks is not None:
216
- x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x)
217
-
218
- x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
219
- x = x + self.interpolate_pos_encoding(x, w, h)
220
-
221
- if self.register_tokens is not None:
222
- x = torch.cat(
223
- (
224
- x[:, :1],
225
- self.register_tokens.expand(x.shape[0], -1, -1),
226
- x[:, 1:],
227
- ),
228
- dim=1,
229
- )
230
-
231
- return x
232
-
233
- def forward_features_list(self, x_list, masks_list):
234
- x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)]
235
- for blk in self.blocks:
236
- x = blk(x)
237
-
238
- all_x = x
239
- output = []
240
- for x, masks in zip(all_x, masks_list):
241
- x_norm = self.norm(x)
242
- output.append(
243
- {
244
- "x_norm_clstoken": x_norm[:, 0],
245
- "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1],
246
- "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :],
247
- "x_prenorm": x,
248
- "masks": masks,
249
- }
250
- )
251
- return output
252
-
253
- def forward_features(self, x, masks=None):
254
- if isinstance(x, list):
255
- return self.forward_features_list(x, masks)
256
-
257
- x = self.prepare_tokens_with_masks(x, masks)
258
-
259
- for blk in self.blocks:
260
- x = blk(x)
261
-
262
- x_norm = self.norm(x)
263
- return {
264
- "x_norm_clstoken": x_norm[:, 0],
265
- "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1],
266
- "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :],
267
- "x_prenorm": x,
268
- "masks": masks,
269
- }
270
-
271
- def _get_intermediate_layers_not_chunked(self, x, n=1):
272
- x = self.prepare_tokens_with_masks(x)
273
- # If n is an int, take the n last blocks. If it's a list, take them
274
- output, total_block_len = [], len(self.blocks)
275
- blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
276
- for i, blk in enumerate(self.blocks):
277
- x = blk(x)
278
- if i in blocks_to_take:
279
- output.append(x)
280
- assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
281
- return output
282
-
283
- def _get_intermediate_layers_chunked(self, x, n=1):
284
- x = self.prepare_tokens_with_masks(x)
285
- output, i, total_block_len = [], 0, len(self.blocks[-1])
286
- # If n is an int, take the n last blocks. If it's a list, take them
287
- blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
288
- for block_chunk in self.blocks:
289
- for blk in block_chunk[i:]: # Passing the nn.Identity()
290
- x = blk(x)
291
- if i in blocks_to_take:
292
- output.append(x)
293
- i += 1
294
- assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
295
- return output
296
-
297
- def get_intermediate_layers(
298
- self,
299
- x: torch.Tensor,
300
- n: Union[int, Sequence] = 1, # Layers or n last layers to take
301
- reshape: bool = False,
302
- return_class_token: bool = False,
303
- norm=True
304
- ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
305
- if self.chunked_blocks:
306
- outputs = self._get_intermediate_layers_chunked(x, n)
307
- else:
308
- outputs = self._get_intermediate_layers_not_chunked(x, n)
309
- if norm:
310
- outputs = [self.norm(out) for out in outputs]
311
- class_tokens = [out[:, 0] for out in outputs]
312
- outputs = [out[:, 1 + self.num_register_tokens:] for out in outputs]
313
- if reshape:
314
- B, _, w, h = x.shape
315
- outputs = [
316
- out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous()
317
- for out in outputs
318
- ]
319
- if return_class_token:
320
- return tuple(zip(outputs, class_tokens))
321
- return tuple(outputs)
322
-
323
- def forward(self, *args, is_training=False, **kwargs):
324
- ret = self.forward_features(*args, **kwargs)
325
- if is_training:
326
- return ret
327
- else:
328
- return self.head(ret["x_norm_clstoken"])
329
-
330
-
331
- def init_weights_vit_timm(module: nn.Module, name: str = ""):
332
- """ViT weight initialization, original timm impl (for reproducibility)"""
333
- if isinstance(module, nn.Linear):
334
- trunc_normal_(module.weight, std=0.02)
335
- if module.bias is not None:
336
- nn.init.zeros_(module.bias)
337
-
338
-
339
- def vit_small(patch_size=16, num_register_tokens=0, **kwargs):
340
- model = DinoVisionTransformer(
341
- patch_size=patch_size,
342
- embed_dim=384,
343
- depth=12,
344
- num_heads=6,
345
- mlp_ratio=4,
346
- block_fn=partial(Block, attn_class=MemEffAttention),
347
- num_register_tokens=num_register_tokens,
348
- **kwargs,
349
- )
350
- return model
351
-
352
-
353
- def vit_base(patch_size=16, num_register_tokens=0, **kwargs):
354
- model = DinoVisionTransformer(
355
- patch_size=patch_size,
356
- embed_dim=768,
357
- depth=12,
358
- num_heads=12,
359
- mlp_ratio=4,
360
- block_fn=partial(Block, attn_class=MemEffAttention),
361
- num_register_tokens=num_register_tokens,
362
- **kwargs,
363
- )
364
- return model
365
-
366
-
367
- def vit_large(patch_size=16, num_register_tokens=0, **kwargs):
368
- model = DinoVisionTransformer(
369
- patch_size=patch_size,
370
- embed_dim=1024,
371
- depth=24,
372
- num_heads=16,
373
- mlp_ratio=4,
374
- block_fn=partial(Block, attn_class=MemEffAttention),
375
- num_register_tokens=num_register_tokens,
376
- **kwargs,
377
- )
378
- return model
379
-
380
-
381
- def vit_giant2(patch_size=16, num_register_tokens=0, **kwargs):
382
- """
383
- Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64
384
- """
385
- model = DinoVisionTransformer(
386
- patch_size=patch_size,
387
- embed_dim=1536,
388
- depth=40,
389
- num_heads=24,
390
- mlp_ratio=4,
391
- block_fn=partial(Block, attn_class=MemEffAttention),
392
- num_register_tokens=num_register_tokens,
393
- **kwargs,
394
- )
395
- return model
396
-
397
-
398
- def DINOv2(model_name):
399
- model_zoo = {
400
- "vits": vit_small,
401
- "vitb": vit_base,
402
- "vitl": vit_large,
403
- "vitg": vit_giant2
404
- }
405
-
406
- return model_zoo[model_name](
407
- img_size=518,
408
- patch_size=14,
409
- init_values=1.0,
410
- ffn_layer="mlp" if model_name != "vitg" else "swiglufused",
411
- block_chunks=0,
412
- num_register_tokens=0,
413
- interpolate_antialias=False,
414
- interpolate_offset=0.1
415
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/dinov2_layers/__init__.py DELETED
@@ -1,11 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from .mlp import Mlp
8
- from .patch_embed import PatchEmbed
9
- from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused
10
- from .block import NestedTensorBlock
11
- from .attention import MemEffAttention
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/dinov2_layers/attention.py DELETED
@@ -1,83 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # References:
8
- # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
9
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
10
-
11
- import logging
12
-
13
- from torch import Tensor
14
- from torch import nn
15
-
16
-
17
- logger = logging.getLogger("dinov2")
18
-
19
-
20
- try:
21
- from xformers.ops import memory_efficient_attention, unbind, fmha
22
-
23
- XFORMERS_AVAILABLE = True
24
- except ImportError:
25
- logger.warning("xFormers not available")
26
- XFORMERS_AVAILABLE = False
27
-
28
-
29
- class Attention(nn.Module):
30
- def __init__(
31
- self,
32
- dim: int,
33
- num_heads: int = 8,
34
- qkv_bias: bool = False,
35
- proj_bias: bool = True,
36
- attn_drop: float = 0.0,
37
- proj_drop: float = 0.0,
38
- ) -> None:
39
- super().__init__()
40
- self.num_heads = num_heads
41
- head_dim = dim // num_heads
42
- self.scale = head_dim**-0.5
43
-
44
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
45
- self.attn_drop = nn.Dropout(attn_drop)
46
- self.proj = nn.Linear(dim, dim, bias=proj_bias)
47
- self.proj_drop = nn.Dropout(proj_drop)
48
-
49
- def forward(self, x: Tensor) -> Tensor:
50
- B, N, C = x.shape
51
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
52
-
53
- q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]
54
- attn = q @ k.transpose(-2, -1)
55
-
56
- attn = attn.softmax(dim=-1)
57
- attn = self.attn_drop(attn)
58
-
59
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
60
- x = self.proj(x)
61
- x = self.proj_drop(x)
62
- return x
63
-
64
-
65
- class MemEffAttention(Attention):
66
- def forward(self, x: Tensor, attn_bias=None) -> Tensor:
67
- if not XFORMERS_AVAILABLE:
68
- assert attn_bias is None, "xFormers is required for nested tensors usage"
69
- return super().forward(x)
70
-
71
- B, N, C = x.shape
72
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
73
-
74
- q, k, v = unbind(qkv, 2)
75
-
76
- x = memory_efficient_attention(q, k, v, attn_bias=attn_bias)
77
- x = x.reshape([B, N, C])
78
-
79
- x = self.proj(x)
80
- x = self.proj_drop(x)
81
- return x
82
-
83
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/dinov2_layers/block.py DELETED
@@ -1,252 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # References:
8
- # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
9
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
10
-
11
- import logging
12
- from typing import Callable, List, Any, Tuple, Dict
13
-
14
- import torch
15
- from torch import nn, Tensor
16
-
17
- from .attention import Attention, MemEffAttention
18
- from .drop_path import DropPath
19
- from .layer_scale import LayerScale
20
- from .mlp import Mlp
21
-
22
-
23
- logger = logging.getLogger("dinov2")
24
-
25
-
26
- try:
27
- from xformers.ops import fmha
28
- from xformers.ops import scaled_index_add, index_select_cat
29
-
30
- XFORMERS_AVAILABLE = True
31
- except ImportError:
32
- logger.warning("xFormers not available")
33
- XFORMERS_AVAILABLE = False
34
-
35
-
36
- class Block(nn.Module):
37
- def __init__(
38
- self,
39
- dim: int,
40
- num_heads: int,
41
- mlp_ratio: float = 4.0,
42
- qkv_bias: bool = False,
43
- proj_bias: bool = True,
44
- ffn_bias: bool = True,
45
- drop: float = 0.0,
46
- attn_drop: float = 0.0,
47
- init_values=None,
48
- drop_path: float = 0.0,
49
- act_layer: Callable[..., nn.Module] = nn.GELU,
50
- norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
51
- attn_class: Callable[..., nn.Module] = Attention,
52
- ffn_layer: Callable[..., nn.Module] = Mlp,
53
- ) -> None:
54
- super().__init__()
55
- # print(f"biases: qkv: {qkv_bias}, proj: {proj_bias}, ffn: {ffn_bias}")
56
- self.norm1 = norm_layer(dim)
57
- self.attn = attn_class(
58
- dim,
59
- num_heads=num_heads,
60
- qkv_bias=qkv_bias,
61
- proj_bias=proj_bias,
62
- attn_drop=attn_drop,
63
- proj_drop=drop,
64
- )
65
- self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
66
- self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
67
-
68
- self.norm2 = norm_layer(dim)
69
- mlp_hidden_dim = int(dim * mlp_ratio)
70
- self.mlp = ffn_layer(
71
- in_features=dim,
72
- hidden_features=mlp_hidden_dim,
73
- act_layer=act_layer,
74
- drop=drop,
75
- bias=ffn_bias,
76
- )
77
- self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
78
- self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
79
-
80
- self.sample_drop_ratio = drop_path
81
-
82
- def forward(self, x: Tensor) -> Tensor:
83
- def attn_residual_func(x: Tensor) -> Tensor:
84
- return self.ls1(self.attn(self.norm1(x)))
85
-
86
- def ffn_residual_func(x: Tensor) -> Tensor:
87
- return self.ls2(self.mlp(self.norm2(x)))
88
-
89
- if self.training and self.sample_drop_ratio > 0.1:
90
- # the overhead is compensated only for a drop path rate larger than 0.1
91
- x = drop_add_residual_stochastic_depth(
92
- x,
93
- residual_func=attn_residual_func,
94
- sample_drop_ratio=self.sample_drop_ratio,
95
- )
96
- x = drop_add_residual_stochastic_depth(
97
- x,
98
- residual_func=ffn_residual_func,
99
- sample_drop_ratio=self.sample_drop_ratio,
100
- )
101
- elif self.training and self.sample_drop_ratio > 0.0:
102
- x = x + self.drop_path1(attn_residual_func(x))
103
- x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2
104
- else:
105
- x = x + attn_residual_func(x)
106
- x = x + ffn_residual_func(x)
107
- return x
108
-
109
-
110
- def drop_add_residual_stochastic_depth(
111
- x: Tensor,
112
- residual_func: Callable[[Tensor], Tensor],
113
- sample_drop_ratio: float = 0.0,
114
- ) -> Tensor:
115
- # 1) extract subset using permutation
116
- b, n, d = x.shape
117
- sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
118
- brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
119
- x_subset = x[brange]
120
-
121
- # 2) apply residual_func to get residual
122
- residual = residual_func(x_subset)
123
-
124
- x_flat = x.flatten(1)
125
- residual = residual.flatten(1)
126
-
127
- residual_scale_factor = b / sample_subset_size
128
-
129
- # 3) add the residual
130
- x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
131
- return x_plus_residual.view_as(x)
132
-
133
-
134
- def get_branges_scales(x, sample_drop_ratio=0.0):
135
- b, n, d = x.shape
136
- sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
137
- brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
138
- residual_scale_factor = b / sample_subset_size
139
- return brange, residual_scale_factor
140
-
141
-
142
- def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None):
143
- if scaling_vector is None:
144
- x_flat = x.flatten(1)
145
- residual = residual.flatten(1)
146
- x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
147
- else:
148
- x_plus_residual = scaled_index_add(
149
- x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor
150
- )
151
- return x_plus_residual
152
-
153
-
154
- attn_bias_cache: Dict[Tuple, Any] = {}
155
-
156
-
157
- def get_attn_bias_and_cat(x_list, branges=None):
158
- """
159
- this will perform the index select, cat the tensors, and provide the attn_bias from cache
160
- """
161
- batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list]
162
- all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list))
163
- if all_shapes not in attn_bias_cache.keys():
164
- seqlens = []
165
- for b, x in zip(batch_sizes, x_list):
166
- for _ in range(b):
167
- seqlens.append(x.shape[1])
168
- attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)
169
- attn_bias._batch_sizes = batch_sizes
170
- attn_bias_cache[all_shapes] = attn_bias
171
-
172
- if branges is not None:
173
- cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1])
174
- else:
175
- tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list)
176
- cat_tensors = torch.cat(tensors_bs1, dim=1)
177
-
178
- return attn_bias_cache[all_shapes], cat_tensors
179
-
180
-
181
- def drop_add_residual_stochastic_depth_list(
182
- x_list: List[Tensor],
183
- residual_func: Callable[[Tensor, Any], Tensor],
184
- sample_drop_ratio: float = 0.0,
185
- scaling_vector=None,
186
- ) -> Tensor:
187
- # 1) generate random set of indices for dropping samples in the batch
188
- branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list]
189
- branges = [s[0] for s in branges_scales]
190
- residual_scale_factors = [s[1] for s in branges_scales]
191
-
192
- # 2) get attention bias and index+concat the tensors
193
- attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges)
194
-
195
- # 3) apply residual_func to get residual, and split the result
196
- residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore
197
-
198
- outputs = []
199
- for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors):
200
- outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x))
201
- return outputs
202
-
203
-
204
- class NestedTensorBlock(Block):
205
- def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]:
206
- """
207
- x_list contains a list of tensors to nest together and run
208
- """
209
- assert isinstance(self.attn, MemEffAttention)
210
-
211
- if self.training and self.sample_drop_ratio > 0.0:
212
-
213
- def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
214
- return self.attn(self.norm1(x), attn_bias=attn_bias)
215
-
216
- def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
217
- return self.mlp(self.norm2(x))
218
-
219
- x_list = drop_add_residual_stochastic_depth_list(
220
- x_list,
221
- residual_func=attn_residual_func,
222
- sample_drop_ratio=self.sample_drop_ratio,
223
- scaling_vector=self.ls1.gamma if isinstance(self.ls1, LayerScale) else None,
224
- )
225
- x_list = drop_add_residual_stochastic_depth_list(
226
- x_list,
227
- residual_func=ffn_residual_func,
228
- sample_drop_ratio=self.sample_drop_ratio,
229
- scaling_vector=self.ls2.gamma if isinstance(self.ls1, LayerScale) else None,
230
- )
231
- return x_list
232
- else:
233
-
234
- def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
235
- return self.ls1(self.attn(self.norm1(x), attn_bias=attn_bias))
236
-
237
- def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
238
- return self.ls2(self.mlp(self.norm2(x)))
239
-
240
- attn_bias, x = get_attn_bias_and_cat(x_list)
241
- x = x + attn_residual_func(x, attn_bias=attn_bias)
242
- x = x + ffn_residual_func(x)
243
- return attn_bias.split(x)
244
-
245
- def forward(self, x_or_x_list):
246
- if isinstance(x_or_x_list, Tensor):
247
- return super().forward(x_or_x_list)
248
- elif isinstance(x_or_x_list, list):
249
- assert XFORMERS_AVAILABLE, "Please install xFormers for nested tensors usage"
250
- return self.forward_nested(x_or_x_list)
251
- else:
252
- raise AssertionError
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/dinov2_layers/drop_path.py DELETED
@@ -1,35 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # References:
8
- # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
9
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/drop.py
10
-
11
-
12
- from torch import nn
13
-
14
-
15
- def drop_path(x, drop_prob: float = 0.0, training: bool = False):
16
- if drop_prob == 0.0 or not training:
17
- return x
18
- keep_prob = 1 - drop_prob
19
- shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
20
- random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
21
- if keep_prob > 0.0:
22
- random_tensor.div_(keep_prob)
23
- output = x * random_tensor
24
- return output
25
-
26
-
27
- class DropPath(nn.Module):
28
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
29
-
30
- def __init__(self, drop_prob=None):
31
- super(DropPath, self).__init__()
32
- self.drop_prob = drop_prob
33
-
34
- def forward(self, x):
35
- return drop_path(x, self.drop_prob, self.training)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/dinov2_layers/layer_scale.py DELETED
@@ -1,28 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # Modified from: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/vision_transformer.py#L103-L110
8
-
9
- from typing import Union
10
-
11
- import torch
12
- from torch import Tensor
13
- from torch import nn
14
-
15
-
16
- class LayerScale(nn.Module):
17
- def __init__(
18
- self,
19
- dim: int,
20
- init_values: Union[float, Tensor] = 1e-5,
21
- inplace: bool = False,
22
- ) -> None:
23
- super().__init__()
24
- self.inplace = inplace
25
- self.gamma = nn.Parameter(init_values * torch.ones(dim))
26
-
27
- def forward(self, x: Tensor) -> Tensor:
28
- return x.mul_(self.gamma) if self.inplace else x * self.gamma
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/dinov2_layers/mlp.py DELETED
@@ -1,41 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # References:
8
- # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
9
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/mlp.py
10
-
11
-
12
- from typing import Callable, Optional
13
-
14
- from torch import Tensor, nn
15
-
16
-
17
- class Mlp(nn.Module):
18
- def __init__(
19
- self,
20
- in_features: int,
21
- hidden_features: Optional[int] = None,
22
- out_features: Optional[int] = None,
23
- act_layer: Callable[..., nn.Module] = nn.GELU,
24
- drop: float = 0.0,
25
- bias: bool = True,
26
- ) -> None:
27
- super().__init__()
28
- out_features = out_features or in_features
29
- hidden_features = hidden_features or in_features
30
- self.fc1 = nn.Linear(in_features, hidden_features, bias=bias)
31
- self.act = act_layer()
32
- self.fc2 = nn.Linear(hidden_features, out_features, bias=bias)
33
- self.drop = nn.Dropout(drop)
34
-
35
- def forward(self, x: Tensor) -> Tensor:
36
- x = self.fc1(x)
37
- x = self.act(x)
38
- x = self.drop(x)
39
- x = self.fc2(x)
40
- x = self.drop(x)
41
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/dinov2_layers/patch_embed.py DELETED
@@ -1,89 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # References:
8
- # https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
9
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
10
-
11
- from typing import Callable, Optional, Tuple, Union
12
-
13
- from torch import Tensor
14
- import torch.nn as nn
15
-
16
-
17
- def make_2tuple(x):
18
- if isinstance(x, tuple):
19
- assert len(x) == 2
20
- return x
21
-
22
- assert isinstance(x, int)
23
- return (x, x)
24
-
25
-
26
- class PatchEmbed(nn.Module):
27
- """
28
- 2D image to patch embedding: (B,C,H,W) -> (B,N,D)
29
-
30
- Args:
31
- img_size: Image size.
32
- patch_size: Patch token size.
33
- in_chans: Number of input image channels.
34
- embed_dim: Number of linear projection output channels.
35
- norm_layer: Normalization layer.
36
- """
37
-
38
- def __init__(
39
- self,
40
- img_size: Union[int, Tuple[int, int]] = 224,
41
- patch_size: Union[int, Tuple[int, int]] = 16,
42
- in_chans: int = 3,
43
- embed_dim: int = 768,
44
- norm_layer: Optional[Callable] = None,
45
- flatten_embedding: bool = True,
46
- ) -> None:
47
- super().__init__()
48
-
49
- image_HW = make_2tuple(img_size)
50
- patch_HW = make_2tuple(patch_size)
51
- patch_grid_size = (
52
- image_HW[0] // patch_HW[0],
53
- image_HW[1] // patch_HW[1],
54
- )
55
-
56
- self.img_size = image_HW
57
- self.patch_size = patch_HW
58
- self.patches_resolution = patch_grid_size
59
- self.num_patches = patch_grid_size[0] * patch_grid_size[1]
60
-
61
- self.in_chans = in_chans
62
- self.embed_dim = embed_dim
63
-
64
- self.flatten_embedding = flatten_embedding
65
-
66
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW)
67
- self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
68
-
69
- def forward(self, x: Tensor) -> Tensor:
70
- _, _, H, W = x.shape
71
- patch_H, patch_W = self.patch_size
72
-
73
- assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}"
74
- assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}"
75
-
76
- x = self.proj(x) # B C H W
77
- H, W = x.size(2), x.size(3)
78
- x = x.flatten(2).transpose(1, 2) # B HW C
79
- x = self.norm(x)
80
- if not self.flatten_embedding:
81
- x = x.reshape(-1, H, W, self.embed_dim) # B H W C
82
- return x
83
-
84
- def flops(self) -> float:
85
- Ho, Wo = self.patches_resolution
86
- flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
87
- if self.norm is not None:
88
- flops += Ho * Wo * self.embed_dim
89
- return flops
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/dinov2_layers/swiglu_ffn.py DELETED
@@ -1,63 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from typing import Callable, Optional
8
-
9
- from torch import Tensor, nn
10
- import torch.nn.functional as F
11
-
12
-
13
- class SwiGLUFFN(nn.Module):
14
- def __init__(
15
- self,
16
- in_features: int,
17
- hidden_features: Optional[int] = None,
18
- out_features: Optional[int] = None,
19
- act_layer: Callable[..., nn.Module] = None,
20
- drop: float = 0.0,
21
- bias: bool = True,
22
- ) -> None:
23
- super().__init__()
24
- out_features = out_features or in_features
25
- hidden_features = hidden_features or in_features
26
- self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias)
27
- self.w3 = nn.Linear(hidden_features, out_features, bias=bias)
28
-
29
- def forward(self, x: Tensor) -> Tensor:
30
- x12 = self.w12(x)
31
- x1, x2 = x12.chunk(2, dim=-1)
32
- hidden = F.silu(x1) * x2
33
- return self.w3(hidden)
34
-
35
-
36
- try:
37
- from xformers.ops import SwiGLU
38
-
39
- XFORMERS_AVAILABLE = True
40
- except ImportError:
41
- SwiGLU = SwiGLUFFN
42
- XFORMERS_AVAILABLE = False
43
-
44
-
45
- class SwiGLUFFNFused(SwiGLU):
46
- def __init__(
47
- self,
48
- in_features: int,
49
- hidden_features: Optional[int] = None,
50
- out_features: Optional[int] = None,
51
- act_layer: Callable[..., nn.Module] = None,
52
- drop: float = 0.0,
53
- bias: bool = True,
54
- ) -> None:
55
- out_features = out_features or in_features
56
- hidden_features = hidden_features or in_features
57
- hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
58
- super().__init__(
59
- in_features=in_features,
60
- hidden_features=hidden_features,
61
- out_features=out_features,
62
- bias=bias,
63
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/dpt.py DELETED
@@ -1,222 +0,0 @@
1
- import cv2
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from torchvision.transforms import Compose
6
-
7
- from .dinov2 import DINOv2
8
- from .util.blocks import FeatureFusionBlock, _make_scratch
9
- from .util.transform import Resize, NormalizeImage, PrepareForNet
10
-
11
-
12
- def _make_fusion_block(features, use_bn, size=None):
13
- return FeatureFusionBlock(
14
- features,
15
- nn.ReLU(False),
16
- deconv=False,
17
- bn=use_bn,
18
- expand=False,
19
- align_corners=True,
20
- size=size,
21
- )
22
-
23
-
24
- class ConvBlock(nn.Module):
25
- def __init__(self, in_feature, out_feature):
26
- super().__init__()
27
-
28
- self.conv_block = nn.Sequential(
29
- nn.Conv2d(in_feature, out_feature, kernel_size=3, stride=1, padding=1),
30
- nn.BatchNorm2d(out_feature),
31
- nn.ReLU(True)
32
- )
33
-
34
- def forward(self, x):
35
- return self.conv_block(x)
36
-
37
-
38
- class DPTHead(nn.Module):
39
- def __init__(
40
- self,
41
- in_channels,
42
- features=256,
43
- use_bn=False,
44
- out_channels=[256, 512, 1024, 1024],
45
- use_clstoken=False
46
- ):
47
- super(DPTHead, self).__init__()
48
-
49
- self.use_clstoken = use_clstoken
50
-
51
- self.projects = nn.ModuleList([
52
- nn.Conv2d(
53
- in_channels=in_channels,
54
- out_channels=out_channel,
55
- kernel_size=1,
56
- stride=1,
57
- padding=0,
58
- ) for out_channel in out_channels
59
- ])
60
-
61
- self.resize_layers = nn.ModuleList([
62
- nn.ConvTranspose2d(
63
- in_channels=out_channels[0],
64
- out_channels=out_channels[0],
65
- kernel_size=4,
66
- stride=4,
67
- padding=0),
68
- nn.ConvTranspose2d(
69
- in_channels=out_channels[1],
70
- out_channels=out_channels[1],
71
- kernel_size=2,
72
- stride=2,
73
- padding=0),
74
- nn.Identity(),
75
- nn.Conv2d(
76
- in_channels=out_channels[3],
77
- out_channels=out_channels[3],
78
- kernel_size=3,
79
- stride=2,
80
- padding=1)
81
- ])
82
-
83
- if use_clstoken:
84
- self.readout_projects = nn.ModuleList()
85
- for _ in range(len(self.projects)):
86
- self.readout_projects.append(
87
- nn.Sequential(
88
- nn.Linear(2 * in_channels, in_channels),
89
- nn.GELU()))
90
-
91
- self.scratch = _make_scratch(
92
- out_channels,
93
- features,
94
- groups=1,
95
- expand=False,
96
- )
97
-
98
- self.scratch.stem_transpose = None
99
-
100
- self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
101
- self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
102
- self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
103
- self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
104
-
105
- head_features_1 = features
106
- head_features_2 = 32
107
-
108
- self.scratch.output_conv1 = nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1)
109
- self.scratch.output_conv2 = nn.Sequential(
110
- nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
111
- nn.ReLU(True),
112
- nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
113
- nn.Sigmoid()
114
- )
115
-
116
- def forward(self, out_features, patch_h, patch_w):
117
- out = []
118
- for i, x in enumerate(out_features):
119
- if self.use_clstoken:
120
- x, cls_token = x[0], x[1]
121
- readout = cls_token.unsqueeze(1).expand_as(x)
122
- x = self.readout_projects[i](torch.cat((x, readout), -1))
123
- else:
124
- x = x[0]
125
-
126
- x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w))
127
-
128
- x = self.projects[i](x)
129
- x = self.resize_layers[i](x)
130
-
131
- out.append(x)
132
-
133
- layer_1, layer_2, layer_3, layer_4 = out
134
-
135
- layer_1_rn = self.scratch.layer1_rn(layer_1)
136
- layer_2_rn = self.scratch.layer2_rn(layer_2)
137
- layer_3_rn = self.scratch.layer3_rn(layer_3)
138
- layer_4_rn = self.scratch.layer4_rn(layer_4)
139
-
140
- path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
141
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
142
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
143
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
144
-
145
- out = self.scratch.output_conv1(path_1)
146
- out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True)
147
- out = self.scratch.output_conv2(out)
148
-
149
- return out
150
-
151
-
152
- class DepthAnythingV2(nn.Module):
153
- def __init__(
154
- self,
155
- encoder='vitl',
156
- features=256,
157
- out_channels=[256, 512, 1024, 1024],
158
- use_bn=False,
159
- use_clstoken=False,
160
- max_depth=20.0
161
- ):
162
- super(DepthAnythingV2, self).__init__()
163
-
164
- self.intermediate_layer_idx = {
165
- 'vits': [2, 5, 8, 11],
166
- 'vitb': [2, 5, 8, 11],
167
- 'vitl': [4, 11, 17, 23],
168
- 'vitg': [9, 19, 29, 39]
169
- }
170
-
171
- self.max_depth = max_depth
172
-
173
- self.encoder = encoder
174
- self.pretrained = DINOv2(model_name=encoder)
175
-
176
- self.depth_head = DPTHead(self.pretrained.embed_dim, features, use_bn, out_channels=out_channels, use_clstoken=use_clstoken)
177
-
178
- def forward(self, x):
179
- patch_h, patch_w = x.shape[-2] // 14, x.shape[-1] // 14
180
-
181
- features = self.pretrained.get_intermediate_layers(x, self.intermediate_layer_idx[self.encoder], return_class_token=True)
182
-
183
- depth = self.depth_head(features, patch_h, patch_w) * self.max_depth
184
-
185
- return depth.squeeze(1)
186
-
187
- @torch.no_grad()
188
- def infer_image(self, raw_image, input_size=518):
189
- image, (h, w) = self.image2tensor(raw_image, input_size)
190
-
191
- depth = self.forward(image)
192
-
193
- depth = F.interpolate(depth[:, None], (h, w), mode="bilinear", align_corners=True)[0, 0]
194
-
195
- return depth.cpu().numpy()
196
-
197
- def image2tensor(self, raw_image, input_size=518):
198
- transform = Compose([
199
- Resize(
200
- width=input_size,
201
- height=input_size,
202
- resize_target=False,
203
- keep_aspect_ratio=True,
204
- ensure_multiple_of=14,
205
- resize_method='lower_bound',
206
- image_interpolation_method=cv2.INTER_CUBIC,
207
- ),
208
- NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
209
- PrepareForNet(),
210
- ])
211
-
212
- h, w = raw_image.shape[:2]
213
-
214
- image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB) / 255.0
215
-
216
- image = transform({'image': image})['image']
217
- image = torch.from_numpy(image).unsqueeze(0)
218
-
219
- DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
220
- image = image.to(DEVICE)
221
-
222
- return image, (h, w)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/util/blocks.py DELETED
@@ -1,148 +0,0 @@
1
- import torch.nn as nn
2
-
3
-
4
- def _make_scratch(in_shape, out_shape, groups=1, expand=False):
5
- scratch = nn.Module()
6
-
7
- out_shape1 = out_shape
8
- out_shape2 = out_shape
9
- out_shape3 = out_shape
10
- if len(in_shape) >= 4:
11
- out_shape4 = out_shape
12
-
13
- if expand:
14
- out_shape1 = out_shape
15
- out_shape2 = out_shape * 2
16
- out_shape3 = out_shape * 4
17
- if len(in_shape) >= 4:
18
- out_shape4 = out_shape * 8
19
-
20
- scratch.layer1_rn = nn.Conv2d(in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
21
- scratch.layer2_rn = nn.Conv2d(in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
22
- scratch.layer3_rn = nn.Conv2d(in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
23
- if len(in_shape) >= 4:
24
- scratch.layer4_rn = nn.Conv2d(in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups)
25
-
26
- return scratch
27
-
28
-
29
- class ResidualConvUnit(nn.Module):
30
- """Residual convolution module.
31
- """
32
-
33
- def __init__(self, features, activation, bn):
34
- """Init.
35
-
36
- Args:
37
- features (int): number of features
38
- """
39
- super().__init__()
40
-
41
- self.bn = bn
42
-
43
- self.groups=1
44
-
45
- self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
46
-
47
- self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups)
48
-
49
- if self.bn == True:
50
- self.bn1 = nn.BatchNorm2d(features)
51
- self.bn2 = nn.BatchNorm2d(features)
52
-
53
- self.activation = activation
54
-
55
- self.skip_add = nn.quantized.FloatFunctional()
56
-
57
- def forward(self, x):
58
- """Forward pass.
59
-
60
- Args:
61
- x (tensor): input
62
-
63
- Returns:
64
- tensor: output
65
- """
66
-
67
- out = self.activation(x)
68
- out = self.conv1(out)
69
- if self.bn == True:
70
- out = self.bn1(out)
71
-
72
- out = self.activation(out)
73
- out = self.conv2(out)
74
- if self.bn == True:
75
- out = self.bn2(out)
76
-
77
- if self.groups > 1:
78
- out = self.conv_merge(out)
79
-
80
- return self.skip_add.add(out, x)
81
-
82
-
83
- class FeatureFusionBlock(nn.Module):
84
- """Feature fusion block.
85
- """
86
-
87
- def __init__(
88
- self,
89
- features,
90
- activation,
91
- deconv=False,
92
- bn=False,
93
- expand=False,
94
- align_corners=True,
95
- size=None
96
- ):
97
- """Init.
98
-
99
- Args:
100
- features (int): number of features
101
- """
102
- super(FeatureFusionBlock, self).__init__()
103
-
104
- self.deconv = deconv
105
- self.align_corners = align_corners
106
-
107
- self.groups=1
108
-
109
- self.expand = expand
110
- out_features = features
111
- if self.expand == True:
112
- out_features = features // 2
113
-
114
- self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
115
-
116
- self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
117
- self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
118
-
119
- self.skip_add = nn.quantized.FloatFunctional()
120
-
121
- self.size=size
122
-
123
- def forward(self, *xs, size=None):
124
- """Forward pass.
125
-
126
- Returns:
127
- tensor: output
128
- """
129
- output = xs[0]
130
-
131
- if len(xs) == 2:
132
- res = self.resConfUnit1(xs[1])
133
- output = self.skip_add.add(output, res)
134
-
135
- output = self.resConfUnit2(output)
136
-
137
- if (size is None) and (self.size is None):
138
- modifier = {"scale_factor": 2}
139
- elif size is None:
140
- modifier = {"size": self.size}
141
- else:
142
- modifier = {"size": size}
143
-
144
- output = nn.functional.interpolate(output, **modifier, mode="bilinear", align_corners=self.align_corners)
145
-
146
- output = self.out_conv(output)
147
-
148
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_anything_v2/util/transform.py DELETED
@@ -1,158 +0,0 @@
1
- import numpy as np
2
- import cv2
3
-
4
-
5
- class Resize(object):
6
- """Resize sample to given size (width, height).
7
- """
8
-
9
- def __init__(
10
- self,
11
- width,
12
- height,
13
- resize_target=True,
14
- keep_aspect_ratio=False,
15
- ensure_multiple_of=1,
16
- resize_method="lower_bound",
17
- image_interpolation_method=cv2.INTER_AREA,
18
- ):
19
- """Init.
20
-
21
- Args:
22
- width (int): desired output width
23
- height (int): desired output height
24
- resize_target (bool, optional):
25
- True: Resize the full sample (image, mask, target).
26
- False: Resize image only.
27
- Defaults to True.
28
- keep_aspect_ratio (bool, optional):
29
- True: Keep the aspect ratio of the input sample.
30
- Output sample might not have the given width and height, and
31
- resize behaviour depends on the parameter 'resize_method'.
32
- Defaults to False.
33
- ensure_multiple_of (int, optional):
34
- Output width and height is constrained to be multiple of this parameter.
35
- Defaults to 1.
36
- resize_method (str, optional):
37
- "lower_bound": Output will be at least as large as the given size.
38
- "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
39
- "minimal": Scale as least as possible. (Output size might be smaller than given size.)
40
- Defaults to "lower_bound".
41
- """
42
- self.__width = width
43
- self.__height = height
44
-
45
- self.__resize_target = resize_target
46
- self.__keep_aspect_ratio = keep_aspect_ratio
47
- self.__multiple_of = ensure_multiple_of
48
- self.__resize_method = resize_method
49
- self.__image_interpolation_method = image_interpolation_method
50
-
51
- def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
52
- y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
53
-
54
- if max_val is not None and y > max_val:
55
- y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
56
-
57
- if y < min_val:
58
- y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
59
-
60
- return y
61
-
62
- def get_size(self, width, height):
63
- # determine new height and width
64
- scale_height = self.__height / height
65
- scale_width = self.__width / width
66
-
67
- if self.__keep_aspect_ratio:
68
- if self.__resize_method == "lower_bound":
69
- # scale such that output size is lower bound
70
- if scale_width > scale_height:
71
- # fit width
72
- scale_height = scale_width
73
- else:
74
- # fit height
75
- scale_width = scale_height
76
- elif self.__resize_method == "upper_bound":
77
- # scale such that output size is upper bound
78
- if scale_width < scale_height:
79
- # fit width
80
- scale_height = scale_width
81
- else:
82
- # fit height
83
- scale_width = scale_height
84
- elif self.__resize_method == "minimal":
85
- # scale as least as possbile
86
- if abs(1 - scale_width) < abs(1 - scale_height):
87
- # fit width
88
- scale_height = scale_width
89
- else:
90
- # fit height
91
- scale_width = scale_height
92
- else:
93
- raise ValueError(f"resize_method {self.__resize_method} not implemented")
94
-
95
- if self.__resize_method == "lower_bound":
96
- new_height = self.constrain_to_multiple_of(scale_height * height, min_val=self.__height)
97
- new_width = self.constrain_to_multiple_of(scale_width * width, min_val=self.__width)
98
- elif self.__resize_method == "upper_bound":
99
- new_height = self.constrain_to_multiple_of(scale_height * height, max_val=self.__height)
100
- new_width = self.constrain_to_multiple_of(scale_width * width, max_val=self.__width)
101
- elif self.__resize_method == "minimal":
102
- new_height = self.constrain_to_multiple_of(scale_height * height)
103
- new_width = self.constrain_to_multiple_of(scale_width * width)
104
- else:
105
- raise ValueError(f"resize_method {self.__resize_method} not implemented")
106
-
107
- return (new_width, new_height)
108
-
109
- def __call__(self, sample):
110
- width, height = self.get_size(sample["image"].shape[1], sample["image"].shape[0])
111
-
112
- # resize sample
113
- sample["image"] = cv2.resize(sample["image"], (width, height), interpolation=self.__image_interpolation_method)
114
-
115
- if self.__resize_target:
116
- if "depth" in sample:
117
- sample["depth"] = cv2.resize(sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST)
118
-
119
- if "mask" in sample:
120
- sample["mask"] = cv2.resize(sample["mask"].astype(np.float32), (width, height), interpolation=cv2.INTER_NEAREST)
121
-
122
- return sample
123
-
124
-
125
- class NormalizeImage(object):
126
- """Normlize image by given mean and std.
127
- """
128
-
129
- def __init__(self, mean, std):
130
- self.__mean = mean
131
- self.__std = std
132
-
133
- def __call__(self, sample):
134
- sample["image"] = (sample["image"] - self.__mean) / self.__std
135
-
136
- return sample
137
-
138
-
139
- class PrepareForNet(object):
140
- """Prepare sample for usage as network input.
141
- """
142
-
143
- def __init__(self):
144
- pass
145
-
146
- def __call__(self, sample):
147
- image = np.transpose(sample["image"], (2, 0, 1))
148
- sample["image"] = np.ascontiguousarray(image).astype(np.float32)
149
-
150
- if "depth" in sample:
151
- depth = sample["depth"].astype(np.float32)
152
- sample["depth"] = np.ascontiguousarray(depth)
153
-
154
- if "mask" in sample:
155
- sample["mask"] = sample["mask"].astype(np.float32)
156
- sample["mask"] = np.ascontiguousarray(sample["mask"])
157
-
158
- return sample
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/depth_to_pointcloud.py DELETED
@@ -1,83 +0,0 @@
1
- # Born out of Depth Anything V1 Issue 36
2
- # Make sure you have the necessary libraries
3
- # Code by @1ssb
4
-
5
- import argparse
6
- import cv2
7
- import glob
8
- import numpy as np
9
- import open3d as o3d
10
- import os
11
- from PIL import Image
12
- import torch
13
-
14
- from depth_anything_v2.dpt import DepthAnythingV2
15
-
16
-
17
- if __name__ == '__main__':
18
- parser = argparse.ArgumentParser()
19
- parser.add_argument('--encoder', default='vitl', type=str, choices=['vits', 'vitb', 'vitl', 'vitg'])
20
- parser.add_argument('--load-from', default='', type=str)
21
- parser.add_argument('--max-depth', default=20, type=float)
22
-
23
- parser.add_argument('--img-path', type=str)
24
- parser.add_argument('--outdir', type=str, default='./vis_pointcloud')
25
-
26
- args = parser.parse_args()
27
-
28
- # Global settings
29
- FL = 715.0873
30
- FY = 784 * 0.6
31
- FX = 784 * 0.6
32
- NYU_DATA = False
33
- FINAL_HEIGHT = 518
34
- FINAL_WIDTH = 518
35
-
36
- DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
37
-
38
- model_configs = {
39
- 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
40
- 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
41
- 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
42
- 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
43
- }
44
-
45
- depth_anything = DepthAnythingV2(**{**model_configs[args.encoder], 'max_depth': args.max_depth})
46
- depth_anything.load_state_dict(torch.load(args.load_from, map_location='cpu'))
47
- depth_anything = depth_anything.to(DEVICE).eval()
48
-
49
- if os.path.isfile(args.img_path):
50
- if args.img_path.endswith('txt'):
51
- with open(args.img_path, 'r') as f:
52
- filenames = f.read().splitlines()
53
- else:
54
- filenames = [args.img_path]
55
- else:
56
- filenames = glob.glob(os.path.join(args.img_path, '**/*'), recursive=True)
57
-
58
- os.makedirs(args.outdir, exist_ok=True)
59
-
60
- for k, filename in enumerate(filenames):
61
- print(f'Progress {k+1}/{len(filenames)}: {filename}')
62
-
63
- color_image = Image.open(filename).convert('RGB')
64
-
65
- image = cv2.imread(filename)
66
- pred = depth_anything.infer_image(image, FINAL_HEIGHT)
67
-
68
- # Resize color image and depth to final size
69
- resized_color_image = color_image.resize((FINAL_WIDTH, FINAL_HEIGHT), Image.LANCZOS)
70
- resized_pred = Image.fromarray(pred).resize((FINAL_WIDTH, FINAL_HEIGHT), Image.NEAREST)
71
-
72
- focal_length_x, focal_length_y = (FX, FY) if not NYU_DATA else (FL, FL)
73
- x, y = np.meshgrid(np.arange(FINAL_WIDTH), np.arange(FINAL_HEIGHT))
74
- x = (x - FINAL_WIDTH / 2) / focal_length_x
75
- y = (y - FINAL_HEIGHT / 2) / focal_length_y
76
- z = np.array(resized_pred)
77
- points = np.stack((np.multiply(x, z), np.multiply(y, z), z), axis=-1).reshape(-1, 3)
78
- colors = np.array(resized_color_image).reshape(-1, 3) / 255.0
79
-
80
- pcd = o3d.geometry.PointCloud()
81
- pcd.points = o3d.utility.Vector3dVector(points)
82
- pcd.colors = o3d.utility.Vector3dVector(colors)
83
- o3d.io.write_point_cloud(os.path.join(args.outdir, os.path.splitext(os.path.basename(filename))[0] + ".ply"), pcd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/dist_train.sh DELETED
@@ -1,26 +0,0 @@
1
- #!/bin/bash
2
- now=$(date +"%Y%m%d_%H%M%S")
3
-
4
- epoch=120
5
- bs=4
6
- gpus=8
7
- lr=0.000005
8
- encoder=vitl
9
- dataset=hypersim # vkitti
10
- img_size=518
11
- min_depth=0.001
12
- max_depth=20 # 80 for virtual kitti
13
- pretrained_from=../checkpoints/depth_anything_v2_${encoder}.pth
14
- save_path=exp/hypersim # exp/vkitti
15
-
16
- mkdir -p $save_path
17
-
18
- python3 -m torch.distributed.launch \
19
- --nproc_per_node=$gpus \
20
- --nnodes 1 \
21
- --node_rank=0 \
22
- --master_addr=localhost \
23
- --master_port=20596 \
24
- train.py --epoch $epoch --encoder $encoder --bs $bs --lr $lr --save-path $save_path --dataset $dataset \
25
- --img-size $img_size --min-depth $min_depth --max-depth $max_depth --pretrained-from $pretrained_from \
26
- --port 20596 2>&1 | tee -a $save_path/$now.log
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/requirements.txt DELETED
@@ -1,5 +0,0 @@
1
- matplotlib
2
- opencv-python
3
- open3d
4
- torch
5
- torchvision
 
 
 
 
 
 
metric_depth/run.py DELETED
@@ -1,81 +0,0 @@
1
- import argparse
2
- import cv2
3
- import glob
4
- import matplotlib
5
- import numpy as np
6
- import os
7
- import torch
8
-
9
- from depth_anything_v2.dpt import DepthAnythingV2
10
-
11
-
12
- if __name__ == '__main__':
13
- parser = argparse.ArgumentParser(description='Depth Anything V2 Metric Depth Estimation')
14
-
15
- parser.add_argument('--img-path', type=str)
16
- parser.add_argument('--input-size', type=int, default=518)
17
- parser.add_argument('--outdir', type=str, default='./vis_depth')
18
-
19
- parser.add_argument('--encoder', type=str, default='vitl', choices=['vits', 'vitb', 'vitl', 'vitg'])
20
- parser.add_argument('--load-from', type=str, default='checkpoints/depth_anything_v2_metric_hypersim_vitl.pth')
21
- parser.add_argument('--max-depth', type=float, default=20)
22
-
23
- parser.add_argument('--save-numpy', dest='save_numpy', action='store_true', help='save the model raw output')
24
- parser.add_argument('--pred-only', dest='pred_only', action='store_true', help='only display the prediction')
25
- parser.add_argument('--grayscale', dest='grayscale', action='store_true', help='do not apply colorful palette')
26
-
27
- args = parser.parse_args()
28
-
29
- DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
30
-
31
- model_configs = {
32
- 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
33
- 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
34
- 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
35
- 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
36
- }
37
-
38
- depth_anything = DepthAnythingV2(**{**model_configs[args.encoder], 'max_depth': args.max_depth})
39
- depth_anything.load_state_dict(torch.load(args.load_from, map_location='cpu'))
40
- depth_anything = depth_anything.to(DEVICE).eval()
41
-
42
- if os.path.isfile(args.img_path):
43
- if args.img_path.endswith('txt'):
44
- with open(args.img_path, 'r') as f:
45
- filenames = f.read().splitlines()
46
- else:
47
- filenames = [args.img_path]
48
- else:
49
- filenames = glob.glob(os.path.join(args.img_path, '**/*'), recursive=True)
50
-
51
- os.makedirs(args.outdir, exist_ok=True)
52
-
53
- cmap = matplotlib.colormaps.get_cmap('Spectral')
54
-
55
- for k, filename in enumerate(filenames):
56
- print(f'Progress {k+1}/{len(filenames)}: {filename}')
57
-
58
- raw_image = cv2.imread(filename)
59
-
60
- depth = depth_anything.infer_image(raw_image, args.input_size)
61
-
62
- if args.save_numpy:
63
- output_path = os.path.join(args.outdir, os.path.splitext(os.path.basename(filename))[0] + '_raw_depth_meter.npy')
64
- np.save(output_path, depth)
65
-
66
- depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
67
- depth = depth.astype(np.uint8)
68
-
69
- if args.grayscale:
70
- depth = np.repeat(depth[..., np.newaxis], 3, axis=-1)
71
- else:
72
- depth = (cmap(depth)[:, :, :3] * 255)[:, :, ::-1].astype(np.uint8)
73
-
74
- output_path = os.path.join(args.outdir, os.path.splitext(os.path.basename(filename))[0] + '.png')
75
- if args.pred_only:
76
- cv2.imwrite(output_path, depth)
77
- else:
78
- split_region = np.ones((raw_image.shape[0], 50, 3), dtype=np.uint8) * 255
79
- combined_result = cv2.hconcat([raw_image, split_region, depth])
80
-
81
- cv2.imwrite(output_path, combined_result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/train.py DELETED
@@ -1,212 +0,0 @@
1
- import argparse
2
- import logging
3
- import os
4
- import pprint
5
- import random
6
-
7
- import warnings
8
- import numpy as np
9
- import torch
10
- import torch.backends.cudnn as cudnn
11
- import torch.distributed as dist
12
- from torch.utils.data import DataLoader
13
- from torch.optim import AdamW
14
- import torch.nn.functional as F
15
- from torch.utils.tensorboard import SummaryWriter
16
-
17
- from dataset.hypersim import Hypersim
18
- from dataset.kitti import KITTI
19
- from dataset.vkitti2 import VKITTI2
20
- from depth_anything_v2.dpt import DepthAnythingV2
21
- from util.dist_helper import setup_distributed
22
- from util.loss import SiLogLoss
23
- from util.metric import eval_depth
24
- from util.utils import init_log
25
-
26
-
27
- parser = argparse.ArgumentParser(description='Depth Anything V2 for Metric Depth Estimation')
28
-
29
- parser.add_argument('--encoder', default='vitl', choices=['vits', 'vitb', 'vitl', 'vitg'])
30
- parser.add_argument('--dataset', default='hypersim', choices=['hypersim', 'vkitti'])
31
- parser.add_argument('--img-size', default=518, type=int)
32
- parser.add_argument('--min-depth', default=0.001, type=float)
33
- parser.add_argument('--max-depth', default=20, type=float)
34
- parser.add_argument('--epochs', default=40, type=int)
35
- parser.add_argument('--bs', default=2, type=int)
36
- parser.add_argument('--lr', default=0.000005, type=float)
37
- parser.add_argument('--pretrained-from', type=str)
38
- parser.add_argument('--save-path', type=str, required=True)
39
- parser.add_argument('--local-rank', default=0, type=int)
40
- parser.add_argument('--port', default=None, type=int)
41
-
42
-
43
- def main():
44
- args = parser.parse_args()
45
-
46
- warnings.simplefilter('ignore', np.RankWarning)
47
-
48
- logger = init_log('global', logging.INFO)
49
- logger.propagate = 0
50
-
51
- rank, world_size = setup_distributed(port=args.port)
52
-
53
- if rank == 0:
54
- all_args = {**vars(args), 'ngpus': world_size}
55
- logger.info('{}\n'.format(pprint.pformat(all_args)))
56
- writer = SummaryWriter(args.save_path)
57
-
58
- cudnn.enabled = True
59
- cudnn.benchmark = True
60
-
61
- size = (args.img_size, args.img_size)
62
- if args.dataset == 'hypersim':
63
- trainset = Hypersim('dataset/splits/hypersim/train.txt', 'train', size=size)
64
- elif args.dataset == 'vkitti':
65
- trainset = VKITTI2('dataset/splits/vkitti2/train.txt', 'train', size=size)
66
- else:
67
- raise NotImplementedError
68
- trainsampler = torch.utils.data.distributed.DistributedSampler(trainset)
69
- trainloader = DataLoader(trainset, batch_size=args.bs, pin_memory=True, num_workers=4, drop_last=True, sampler=trainsampler)
70
-
71
- if args.dataset == 'hypersim':
72
- valset = Hypersim('dataset/splits/hypersim/val.txt', 'val', size=size)
73
- elif args.dataset == 'vkitti':
74
- valset = KITTI('dataset/splits/kitti/val.txt', 'val', size=size)
75
- else:
76
- raise NotImplementedError
77
- valsampler = torch.utils.data.distributed.DistributedSampler(valset)
78
- valloader = DataLoader(valset, batch_size=1, pin_memory=True, num_workers=4, drop_last=True, sampler=valsampler)
79
-
80
- local_rank = int(os.environ["LOCAL_RANK"])
81
-
82
- model_configs = {
83
- 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
84
- 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
85
- 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
86
- 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
87
- }
88
- model = DepthAnythingV2(**{**model_configs[args.encoder], 'max_depth': args.max_depth})
89
-
90
- if args.pretrained_from:
91
- model.load_state_dict({k: v for k, v in torch.load(args.pretrained_from, map_location='cpu').items() if 'pretrained' in k}, strict=False)
92
-
93
- model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
94
- model.cuda(local_rank)
95
- model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False,
96
- output_device=local_rank, find_unused_parameters=True)
97
-
98
- criterion = SiLogLoss().cuda(local_rank)
99
-
100
- optimizer = AdamW([{'params': [param for name, param in model.named_parameters() if 'pretrained' in name], 'lr': args.lr},
101
- {'params': [param for name, param in model.named_parameters() if 'pretrained' not in name], 'lr': args.lr * 10.0}],
102
- lr=args.lr, betas=(0.9, 0.999), weight_decay=0.01)
103
-
104
- total_iters = args.epochs * len(trainloader)
105
-
106
- previous_best = {'d1': 0, 'd2': 0, 'd3': 0, 'abs_rel': 100, 'sq_rel': 100, 'rmse': 100, 'rmse_log': 100, 'log10': 100, 'silog': 100}
107
-
108
- for epoch in range(args.epochs):
109
- if rank == 0:
110
- logger.info('===========> Epoch: {:}/{:}, d1: {:.3f}, d2: {:.3f}, d3: {:.3f}'.format(epoch, args.epochs, previous_best['d1'], previous_best['d2'], previous_best['d3']))
111
- logger.info('===========> Epoch: {:}/{:}, abs_rel: {:.3f}, sq_rel: {:.3f}, rmse: {:.3f}, rmse_log: {:.3f}, '
112
- 'log10: {:.3f}, silog: {:.3f}'.format(
113
- epoch, args.epochs, previous_best['abs_rel'], previous_best['sq_rel'], previous_best['rmse'],
114
- previous_best['rmse_log'], previous_best['log10'], previous_best['silog']))
115
-
116
- trainloader.sampler.set_epoch(epoch + 1)
117
-
118
- model.train()
119
- total_loss = 0
120
-
121
- for i, sample in enumerate(trainloader):
122
- optimizer.zero_grad()
123
-
124
- img, depth, valid_mask = sample['image'].cuda(), sample['depth'].cuda(), sample['valid_mask'].cuda()
125
-
126
- if random.random() < 0.5:
127
- img = img.flip(-1)
128
- depth = depth.flip(-1)
129
- valid_mask = valid_mask.flip(-1)
130
-
131
- pred = model(img)
132
-
133
- loss = criterion(pred, depth, (valid_mask == 1) & (depth >= args.min_depth) & (depth <= args.max_depth))
134
-
135
- loss.backward()
136
- optimizer.step()
137
-
138
- total_loss += loss.item()
139
-
140
- iters = epoch * len(trainloader) + i
141
-
142
- lr = args.lr * (1 - iters / total_iters) ** 0.9
143
-
144
- optimizer.param_groups[0]["lr"] = lr
145
- optimizer.param_groups[1]["lr"] = lr * 10.0
146
-
147
- if rank == 0:
148
- writer.add_scalar('train/loss', loss.item(), iters)
149
-
150
- if rank == 0 and i % 100 == 0:
151
- logger.info('Iter: {}/{}, LR: {:.7f}, Loss: {:.3f}'.format(i, len(trainloader), optimizer.param_groups[0]['lr'], loss.item()))
152
-
153
- model.eval()
154
-
155
- results = {'d1': torch.tensor([0.0]).cuda(), 'd2': torch.tensor([0.0]).cuda(), 'd3': torch.tensor([0.0]).cuda(),
156
- 'abs_rel': torch.tensor([0.0]).cuda(), 'sq_rel': torch.tensor([0.0]).cuda(), 'rmse': torch.tensor([0.0]).cuda(),
157
- 'rmse_log': torch.tensor([0.0]).cuda(), 'log10': torch.tensor([0.0]).cuda(), 'silog': torch.tensor([0.0]).cuda()}
158
- nsamples = torch.tensor([0.0]).cuda()
159
-
160
- for i, sample in enumerate(valloader):
161
-
162
- img, depth, valid_mask = sample['image'].cuda().float(), sample['depth'].cuda()[0], sample['valid_mask'].cuda()[0]
163
-
164
- with torch.no_grad():
165
- pred = model(img)
166
- pred = F.interpolate(pred[:, None], depth.shape[-2:], mode='bilinear', align_corners=True)[0, 0]
167
-
168
- valid_mask = (valid_mask == 1) & (depth >= args.min_depth) & (depth <= args.max_depth)
169
-
170
- if valid_mask.sum() < 10:
171
- continue
172
-
173
- cur_results = eval_depth(pred[valid_mask], depth[valid_mask])
174
-
175
- for k in results.keys():
176
- results[k] += cur_results[k]
177
- nsamples += 1
178
-
179
- torch.distributed.barrier()
180
-
181
- for k in results.keys():
182
- dist.reduce(results[k], dst=0)
183
- dist.reduce(nsamples, dst=0)
184
-
185
- if rank == 0:
186
- logger.info('==========================================================================================')
187
- logger.info('{:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}, {:>8}'.format(*tuple(results.keys())))
188
- logger.info('{:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}, {:8.3f}'.format(*tuple([(v / nsamples).item() for v in results.values()])))
189
- logger.info('==========================================================================================')
190
- print()
191
-
192
- for name, metric in results.items():
193
- writer.add_scalar(f'eval/{name}', (metric / nsamples).item(), epoch)
194
-
195
- for k in results.keys():
196
- if k in ['d1', 'd2', 'd3']:
197
- previous_best[k] = max(previous_best[k], (results[k] / nsamples).item())
198
- else:
199
- previous_best[k] = min(previous_best[k], (results[k] / nsamples).item())
200
-
201
- if rank == 0:
202
- checkpoint = {
203
- 'model': model.state_dict(),
204
- 'optimizer': optimizer.state_dict(),
205
- 'epoch': epoch,
206
- 'previous_best': previous_best,
207
- }
208
- torch.save(checkpoint, os.path.join(args.save_path, 'latest.pth'))
209
-
210
-
211
- if __name__ == '__main__':
212
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/util/dist_helper.py DELETED
@@ -1,41 +0,0 @@
1
- import os
2
- import subprocess
3
-
4
- import torch
5
- import torch.distributed as dist
6
-
7
-
8
- def setup_distributed(backend="nccl", port=None):
9
- """AdaHessian Optimizer
10
- Lifted from https://github.com/BIGBALLON/distribuuuu/blob/master/distribuuuu/utils.py
11
- Originally licensed MIT, Copyright (c) 2020 Wei Li
12
- """
13
- num_gpus = torch.cuda.device_count()
14
-
15
- if "SLURM_JOB_ID" in os.environ:
16
- rank = int(os.environ["SLURM_PROCID"])
17
- world_size = int(os.environ["SLURM_NTASKS"])
18
- node_list = os.environ["SLURM_NODELIST"]
19
- addr = subprocess.getoutput(f"scontrol show hostname {node_list} | head -n1")
20
- # specify master port
21
- if port is not None:
22
- os.environ["MASTER_PORT"] = str(port)
23
- elif "MASTER_PORT" not in os.environ:
24
- os.environ["MASTER_PORT"] = "10685"
25
- if "MASTER_ADDR" not in os.environ:
26
- os.environ["MASTER_ADDR"] = addr
27
- os.environ["WORLD_SIZE"] = str(world_size)
28
- os.environ["LOCAL_RANK"] = str(rank % num_gpus)
29
- os.environ["RANK"] = str(rank)
30
- else:
31
- rank = int(os.environ["RANK"])
32
- world_size = int(os.environ["WORLD_SIZE"])
33
-
34
- torch.cuda.set_device(rank % num_gpus)
35
-
36
- dist.init_process_group(
37
- backend=backend,
38
- world_size=world_size,
39
- rank=rank,
40
- )
41
- return rank, world_size
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/util/loss.py DELETED
@@ -1,16 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
-
5
- class SiLogLoss(nn.Module):
6
- def __init__(self, lambd=0.5):
7
- super().__init__()
8
- self.lambd = lambd
9
-
10
- def forward(self, pred, target, valid_mask):
11
- valid_mask = valid_mask.detach()
12
- diff_log = torch.log(target[valid_mask]) - torch.log(pred[valid_mask])
13
- loss = torch.sqrt(torch.pow(diff_log, 2).mean() -
14
- self.lambd * torch.pow(diff_log.mean(), 2))
15
-
16
- return loss
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/util/metric.py DELETED
@@ -1,26 +0,0 @@
1
- import torch
2
-
3
-
4
- def eval_depth(pred, target):
5
- assert pred.shape == target.shape
6
-
7
- thresh = torch.max((target / pred), (pred / target))
8
-
9
- d1 = torch.sum(thresh < 1.25).float() / len(thresh)
10
- d2 = torch.sum(thresh < 1.25 ** 2).float() / len(thresh)
11
- d3 = torch.sum(thresh < 1.25 ** 3).float() / len(thresh)
12
-
13
- diff = pred - target
14
- diff_log = torch.log(pred) - torch.log(target)
15
-
16
- abs_rel = torch.mean(torch.abs(diff) / target)
17
- sq_rel = torch.mean(torch.pow(diff, 2) / target)
18
-
19
- rmse = torch.sqrt(torch.mean(torch.pow(diff, 2)))
20
- rmse_log = torch.sqrt(torch.mean(torch.pow(diff_log , 2)))
21
-
22
- log10 = torch.mean(torch.abs(torch.log10(pred) - torch.log10(target)))
23
- silog = torch.sqrt(torch.pow(diff_log, 2).mean() - 0.5 * torch.pow(diff_log.mean(), 2))
24
-
25
- return {'d1': d1.item(), 'd2': d2.item(), 'd3': d3.item(), 'abs_rel': abs_rel.item(), 'sq_rel': sq_rel.item(),
26
- 'rmse': rmse.item(), 'rmse_log': rmse_log.item(), 'log10':log10.item(), 'silog':silog.item()}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
metric_depth/util/utils.py DELETED
@@ -1,26 +0,0 @@
1
- import os
2
- import re
3
- import numpy as np
4
- import logging
5
-
6
- logs = set()
7
-
8
-
9
- def init_log(name, level=logging.INFO):
10
- if (name, level) in logs:
11
- return
12
- logs.add((name, level))
13
- logger = logging.getLogger(name)
14
- logger.setLevel(level)
15
- ch = logging.StreamHandler()
16
- ch.setLevel(level)
17
- if "SLURM_PROCID" in os.environ:
18
- rank = int(os.environ["SLURM_PROCID"])
19
- logger.addFilter(lambda record: rank == 0)
20
- else:
21
- rank = 0
22
- format_str = "[%(asctime)s][%(levelname)8s] %(message)s"
23
- formatter = logging.Formatter(format_str)
24
- ch.setFormatter(formatter)
25
- logger.addHandler(ch)
26
- return logger