Norod78 commited on
Commit
6c17409
Β·
verified Β·
1 Parent(s): 9002ba8

Depth Anything V2 - Large as MlPackage

Browse files
DepthAnything_v2-Large.mlpackage.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4eecd277bf3394055bd9faa60960d6e279a1f7bed47ec5ad063f788680e65c91
3
+ size 618122036
DepthAnything_v2-Large_Mac.mlperf.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff701611f6b35cf21f928c7a9c83c8cc0a42daf7602b5be2735d489ebcd21095
3
+ size 75878
DepthAnything_v2-Large_iPhone16ProMax.mlperf.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba032b19dc9d98260aa878cad8962a01ecbf60d241900f01080cd4a6bb2acacb
3
+ size 76022
PyTorch2CoreML-dpt.ipynb ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "1e99de7a",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "--2024-06-20 13:18:56-- https://docs-assets.developer.apple.com/ml-research/datasets/mobileclip/mobileclip_s0.pt\n",
14
+ "Resolving docs-assets.developer.apple.com (docs-assets.developer.apple.com)... 17.253.73.203, 17.253.73.201\n",
15
+ "Connecting to docs-assets.developer.apple.com (docs-assets.developer.apple.com)|17.253.73.203|:443... connected.\n",
16
+ "HTTP request sent, awaiting response... 416 Requested Range Not Satisfiable\n",
17
+ "\n",
18
+ " The file is already fully retrieved; nothing to do.\n",
19
+ "\n",
20
+ "--2024-06-20 13:18:58-- https://raw.githubusercontent.com/apple/ml-mobileclip/main/mobileclip/configs/mobileclip_s0.json\n",
21
+ "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n",
22
+ "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\n",
23
+ "HTTP request sent, awaiting response... 416 Range Not Satisfiable\n",
24
+ "\n",
25
+ " The file is already fully retrieved; nothing to do.\n",
26
+ "\n"
27
+ ]
28
+ }
29
+ ],
30
+ "source": [
31
+ "#!git clone https://huggingface.co/spaces/depth-anything/Depth-Anything-V2\n",
32
+ "#!pip install -r Depth-Anything-V2/requirements.txt\n",
33
+ "#!pip install -q --upgrade coremltools"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": 1,
39
+ "id": "d6cb8a61",
40
+ "metadata": {},
41
+ "outputs": [],
42
+ "source": [
43
+ "import os\n",
44
+ "os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'"
45
+ ]
46
+ },
47
+ {
48
+ "cell_type": "code",
49
+ "execution_count": 2,
50
+ "id": "801db364",
51
+ "metadata": {},
52
+ "outputs": [
53
+ {
54
+ "name": "stderr",
55
+ "output_type": "stream",
56
+ "text": [
57
+ "scikit-learn version 1.6.0 is not supported. Minimum required version: 0.17. Maximum required version: 1.5.1. Disabling scikit-learn conversion API.\n"
58
+ ]
59
+ }
60
+ ],
61
+ "source": [
62
+ "import torch\n",
63
+ "import coremltools as ct\n",
64
+ "import numpy as np\n",
65
+ "from PIL import Image\n",
66
+ "import tempfile\n",
67
+ "from huggingface_hub import hf_hub_download\n",
68
+ "import sys\n",
69
+ "sys.path.append('./Depth-Anything-V2')\n",
70
+ "\n"
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "code",
75
+ "execution_count": 15,
76
+ "id": "73882c02",
77
+ "metadata": {},
78
+ "outputs": [],
79
+ "source": [
80
+ "from depth_anything_v2.dpt import DepthAnythingV2\n",
81
+ "from depth_anything_v2.util.transform import Resize, NormalizeImage, PrepareForNet\n",
82
+ "\n",
83
+ "import torch.nn.functional as F"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "markdown",
88
+ "id": "26f7dcff",
89
+ "metadata": {},
90
+ "source": [
91
+ "# 1. Load Depth-Anything-V2's vitl checkpoint"
92
+ ]
93
+ },
94
+ {
95
+ "cell_type": "code",
96
+ "execution_count": 4,
97
+ "id": "e67aa722",
98
+ "metadata": {},
99
+ "outputs": [],
100
+ "source": [
101
+ "DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'\n",
102
+ "model_configs = {\n",
103
+ " 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},\n",
104
+ " 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},\n",
105
+ " 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},\n",
106
+ " 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}\n",
107
+ "}\n",
108
+ "encoder2name = {\n",
109
+ " 'vits': 'Small',\n",
110
+ " 'vitb': 'Base',\n",
111
+ " 'vitl': 'Large',\n",
112
+ " 'vitg': 'Giant', # we are undergoing company review procedures to release our giant model checkpoint\n",
113
+ "}\n",
114
+ "encoder = 'vitl'\n",
115
+ "model_name = encoder2name[encoder]\n",
116
+ "model = DepthAnythingV2(**model_configs[encoder])\n",
117
+ "filepath = hf_hub_download(repo_id=f\"depth-anything/Depth-Anything-V2-{model_name}\", filename=f\"depth_anything_v2_{encoder}.pth\", repo_type=\"model\")\n",
118
+ "state_dict = torch.load(filepath, map_location=\"cpu\")\n",
119
+ "model.load_state_dict(state_dict)\n",
120
+ "model = model.to(DEVICE).eval()"
121
+ ]
122
+ },
123
+ {
124
+ "cell_type": "code",
125
+ "execution_count": 8,
126
+ "id": "a632e6b4",
127
+ "metadata": {},
128
+ "outputs": [
129
+ {
130
+ "name": "stdout",
131
+ "output_type": "stream",
132
+ "text": [
133
+ "(3024, 4032, 3)\n"
134
+ ]
135
+ }
136
+ ],
137
+ "source": [
138
+ "image = Image.open(\"./sample_images/IMG_4061.jpeg\")\n",
139
+ "img = np.array(image)\n",
140
+ "print(img.shape)\n",
141
+ "h, w = img.shape[:2]\n",
142
+ "depth = model.infer_image(img)\n",
143
+ "depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0\n",
144
+ "depth = depth.astype(np.uint8)\n",
145
+ "depth_image = Image.fromarray(depth)\n",
146
+ "depth_image.save(\"depth_image.jpg\")"
147
+ ]
148
+ },
149
+ {
150
+ "cell_type": "code",
151
+ "execution_count": 36,
152
+ "id": "77477217",
153
+ "metadata": {},
154
+ "outputs": [
155
+ {
156
+ "name": "stdout",
157
+ "output_type": "stream",
158
+ "text": [
159
+ "(3024, 4032, 3)\n"
160
+ ]
161
+ },
162
+ {
163
+ "name": "stderr",
164
+ "output_type": "stream",
165
+ "text": [
166
+ "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dinov2_layers/patch_embed.py:73: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
167
+ " assert H % patch_H == 0, f\"Input image height {H} is not a multiple of patch height {patch_H}\"\n",
168
+ "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dinov2_layers/patch_embed.py:74: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
169
+ " assert W % patch_W == 0, f\"Input image width {W} is not a multiple of patch width: {patch_W}\"\n",
170
+ "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dinov2.py:183: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
171
+ " if npatch == N and w == h:\n",
172
+ "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dpt.py:147: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
173
+ " out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode=\"bilinear\", align_corners=True)\n"
174
+ ]
175
+ }
176
+ ],
177
+ "source": [
178
+ "original_image = Image.open(\"./sample_images/IMG_4061.jpeg\")\n",
179
+ "origina_img = np.array(original_image)\n",
180
+ "print(origina_img.shape)\n",
181
+ "original_h, original_w = origina_img.shape[:2]\n",
182
+ "input_size = 518\n",
183
+ "image = original_image.resize((input_size,input_size), Image.Resampling.BILINEAR)\n",
184
+ "img = np.array(image)\n",
185
+ "input_image, (h, w) = model.image2tensor(img, input_size)\n",
186
+ "input_image = input_image.to(DEVICE)\n",
187
+ "with torch.no_grad():\n",
188
+ " depth = model(input_image)\n",
189
+ " depth = F.interpolate(depth[:, None], (h, w), mode=\"bilinear\", align_corners=True)[0, 0]\n",
190
+ " depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0\n",
191
+ " depth = depth.cpu().numpy().astype(np.uint8)\n",
192
+ "depth_image = Image.fromarray(depth).resize((original_w,original_h), Image.Resampling.BILINEAR)\n",
193
+ "depth_image.save(\"depth_image_2.jpg\")\n",
194
+ "\n",
195
+ "traced_model = torch.jit.trace(model, input_image)\n"
196
+ ]
197
+ },
198
+ {
199
+ "cell_type": "code",
200
+ "execution_count": 37,
201
+ "id": "42632870",
202
+ "metadata": {},
203
+ "outputs": [
204
+ {
205
+ "name": "stdout",
206
+ "output_type": "stream",
207
+ "text": [
208
+ "Traced PyTorch ImageEncoder ckpt out for jpg:\n",
209
+ ">>> tensor([[3.8735, 3.9076, 4.0226, ..., 1.8554, 1.7260, 2.5633],\n",
210
+ " [4.3636, 4.1100, 4.1624, ..., 2.1774, 2.2929, 2.2913],\n",
211
+ " [4.3914, 4.2280, 4.2901, ..., 2.3076, 2.3133, 2.2698],\n",
212
+ " ...,\n",
213
+ " [5.8771, 5.8192, 5.8249, ..., 3.9578, 3.9079, 3.7710],\n",
214
+ " [6.1631, 6.1475, 6.1688, ..., 4.2481, 4.2320, 4.0410],\n",
215
+ " [6.4769, 6.4864, 6.4850, ..., 4.6766, 4.6218, 4.4442]],\n",
216
+ " device='mps:0', grad_fn=<SliceBackward0>)\n"
217
+ ]
218
+ }
219
+ ],
220
+ "source": [
221
+ "example_output = traced_model(input_image)\n",
222
+ "print(\"Traced PyTorch ImageEncoder ckpt out for jpg:\\n>>>\", example_output[0, :10])"
223
+ ]
224
+ },
225
+ {
226
+ "cell_type": "markdown",
227
+ "id": "3c0d9c70",
228
+ "metadata": {},
229
+ "source": [
230
+ "You can see that there is some loss in precision, but it is still acceptable."
231
+ ]
232
+ },
233
+ {
234
+ "cell_type": "markdown",
235
+ "id": "ca182b4a",
236
+ "metadata": {},
237
+ "source": [
238
+ "# 2. Export ImageEncoder"
239
+ ]
240
+ },
241
+ {
242
+ "cell_type": "code",
243
+ "execution_count": 38,
244
+ "id": "ef7af5c5",
245
+ "metadata": {},
246
+ "outputs": [],
247
+ "source": [
248
+ "image_means = [0.485, 0.456, 0.406]\n",
249
+ "image_stds = [0.229, 0.224, 0.225]"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "code",
254
+ "execution_count": 73,
255
+ "id": "8f66a99c",
256
+ "metadata": {},
257
+ "outputs": [],
258
+ "source": [
259
+ "import torchvision.transforms as transforms\n",
260
+ "\n",
261
+ "class Wrapper(torch.nn.Module): \n",
262
+ " def __init__(self, model):\n",
263
+ " super().__init__()\n",
264
+ " _means = image_means\n",
265
+ " _stds = image_stds\n",
266
+ " self.model = model \n",
267
+ " self.stds = torch.tensor(_stds).half()[:,None,None]\n",
268
+ " self.means = torch.tensor(_means).half()[:,None,None]\n",
269
+ "\n",
270
+ " transform_model = torch.nn.Sequential(\n",
271
+ " transforms.Normalize(mean=image_means, std=image_stds)\n",
272
+ " )\n",
273
+ "\n",
274
+ " def forward(self, input): \n",
275
+ " input = input/255.0\n",
276
+ " intput = self.transform_model(input)\n",
277
+ " output = self.model(input)\n",
278
+ " output = (output - output.min()) / (output.max() - output.min()) \n",
279
+ " # Fix \"Image output, 'depthOutput', must have rank 4. Instead it has rank 3\"\n",
280
+ " output = output.unsqueeze(0)\n",
281
+ " # Fix \"Shape of the RGB/BGR image output, 'depthOutput', must be of kind (1, 3, H, W), i.e., first two dimensions must be (1, 3), instead they are: (1, 1)\"ArithmeticError\n",
282
+ " output = output.repeat(1, 3, 1, 1)\n",
283
+ " output = output * 255.0\n",
284
+ " return output\n",
285
+ "\n",
286
+ "# Instantiate the Wrapper model passing the original PyTorch FCN model\n",
287
+ "wrapped_model = Wrapper(traced_model)"
288
+ ]
289
+ },
290
+ {
291
+ "cell_type": "code",
292
+ "execution_count": 74,
293
+ "id": "b3da3350",
294
+ "metadata": {},
295
+ "outputs": [
296
+ {
297
+ "name": "stdout",
298
+ "output_type": "stream",
299
+ "text": [
300
+ "wrapped PyTorch ImageEncoder ckpt out for jpg:\n",
301
+ ">>> tensor([[[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n",
302
+ " 1.2884e-01, 4.5228e-01],\n",
303
+ " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n",
304
+ " 3.9270e-01, 3.3447e-01],\n",
305
+ " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n",
306
+ " 3.8963e-01, 4.5296e-01],\n",
307
+ " ...,\n",
308
+ " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n",
309
+ " 1.0194e+02, 1.0191e+02],\n",
310
+ " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n",
311
+ " 1.0219e+02, 1.0212e+02],\n",
312
+ " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n",
313
+ " 1.0220e+02, 1.0189e+02]],\n",
314
+ "\n",
315
+ " [[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n",
316
+ " 1.2884e-01, 4.5228e-01],\n",
317
+ " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n",
318
+ " 3.9270e-01, 3.3447e-01],\n",
319
+ " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n",
320
+ " 3.8963e-01, 4.5296e-01],\n",
321
+ " ...,\n",
322
+ " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n",
323
+ " 1.0194e+02, 1.0191e+02],\n",
324
+ " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n",
325
+ " 1.0219e+02, 1.0212e+02],\n",
326
+ " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n",
327
+ " 1.0220e+02, 1.0189e+02]],\n",
328
+ "\n",
329
+ " [[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n",
330
+ " 1.2884e-01, 4.5228e-01],\n",
331
+ " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n",
332
+ " 3.9270e-01, 3.3447e-01],\n",
333
+ " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n",
334
+ " 3.8963e-01, 4.5296e-01],\n",
335
+ " ...,\n",
336
+ " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n",
337
+ " 1.0194e+02, 1.0191e+02],\n",
338
+ " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n",
339
+ " 1.0219e+02, 1.0212e+02],\n",
340
+ " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n",
341
+ " 1.0220e+02, 1.0189e+02]]], device='mps:0')\n",
342
+ "Traced wrapped PyTorch ImageEncoder ckpt out for jpg:\n",
343
+ ">>> tensor([[[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n",
344
+ " 1.2884e-01, 4.5228e-01],\n",
345
+ " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n",
346
+ " 3.9270e-01, 3.3447e-01],\n",
347
+ " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n",
348
+ " 3.8963e-01, 4.5296e-01],\n",
349
+ " ...,\n",
350
+ " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n",
351
+ " 1.0194e+02, 1.0191e+02],\n",
352
+ " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n",
353
+ " 1.0219e+02, 1.0212e+02],\n",
354
+ " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n",
355
+ " 1.0220e+02, 1.0189e+02]],\n",
356
+ "\n",
357
+ " [[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n",
358
+ " 1.2884e-01, 4.5228e-01],\n",
359
+ " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n",
360
+ " 3.9270e-01, 3.3447e-01],\n",
361
+ " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n",
362
+ " 3.8963e-01, 4.5296e-01],\n",
363
+ " ...,\n",
364
+ " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n",
365
+ " 1.0194e+02, 1.0191e+02],\n",
366
+ " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n",
367
+ " 1.0219e+02, 1.0212e+02],\n",
368
+ " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n",
369
+ " 1.0220e+02, 1.0189e+02]],\n",
370
+ "\n",
371
+ " [[1.3479e+00, 1.3024e+00, 1.3246e+00, ..., 3.6170e-02,\n",
372
+ " 1.2884e-01, 4.5228e-01],\n",
373
+ " [1.5584e+00, 1.4481e+00, 1.4059e+00, ..., 3.4862e-01,\n",
374
+ " 3.9270e-01, 3.3447e-01],\n",
375
+ " [1.6099e+00, 1.5023e+00, 1.5238e+00, ..., 3.6392e-01,\n",
376
+ " 3.8963e-01, 4.5296e-01],\n",
377
+ " ...,\n",
378
+ " [1.0288e+02, 1.0318e+02, 1.0304e+02, ..., 1.0168e+02,\n",
379
+ " 1.0194e+02, 1.0191e+02],\n",
380
+ " [1.0353e+02, 1.0333e+02, 1.0334e+02, ..., 1.0216e+02,\n",
381
+ " 1.0219e+02, 1.0212e+02],\n",
382
+ " [1.0339e+02, 1.0290e+02, 1.0300e+02, ..., 1.0180e+02,\n",
383
+ " 1.0220e+02, 1.0189e+02]]], device='mps:0')\n"
384
+ ]
385
+ }
386
+ ],
387
+ "source": [
388
+ "i = np.asarray(original_image.resize((518, 518)))\n",
389
+ "i = i.astype(\"float32\")\n",
390
+ "i = np.transpose(i, (2, 0, 1))\n",
391
+ "i = np.expand_dims(i, 0)\n",
392
+ "i = torch.from_numpy(i).to(DEVICE)\n",
393
+ "\n",
394
+ "with torch.no_grad():\n",
395
+ " out = wrapped_model(i)\n",
396
+ "\n",
397
+ "print(\"wrapped PyTorch ImageEncoder ckpt out for jpg:\\n>>>\", out[0, :10])\n",
398
+ "\n",
399
+ "traced_model_w = torch.jit.trace(wrapped_model, i)\n",
400
+ "\n",
401
+ "with torch.no_grad():\n",
402
+ " out = traced_model_w(i)\n",
403
+ "\n",
404
+ "print(\"Traced wrapped PyTorch ImageEncoder ckpt out for jpg:\\n>>>\", out[0, :10])"
405
+ ]
406
+ },
407
+ {
408
+ "cell_type": "code",
409
+ "execution_count": 86,
410
+ "id": "db5cb9b9",
411
+ "metadata": {},
412
+ "outputs": [
413
+ {
414
+ "data": {
415
+ "text/plain": [
416
+ "(torch.Size([1, 3, 518, 518]), torch.Size([1, 3, 518, 518]))"
417
+ ]
418
+ },
419
+ "execution_count": 86,
420
+ "metadata": {},
421
+ "output_type": "execute_result"
422
+ }
423
+ ],
424
+ "source": [
425
+ "i.shape, out.shape"
426
+ ]
427
+ },
428
+ {
429
+ "cell_type": "code",
430
+ "execution_count": 92,
431
+ "id": "681683aa",
432
+ "metadata": {},
433
+ "outputs": [
434
+ {
435
+ "name": "stdout",
436
+ "output_type": "stream",
437
+ "text": [
438
+ "(1, 3, 518, 518) 255.0 0.0 104.07214\n",
439
+ "(518, 518, 3) 255 0 103.57204722648738\n"
440
+ ]
441
+ }
442
+ ],
443
+ "source": [
444
+ "tmp = out.cpu().numpy()\n",
445
+ "\n",
446
+ "print(tmp.shape, tmp.max(), tmp.min(), tmp.mean())\n",
447
+ "# Convert to 3, 256, 256\n",
448
+ "tmp = np.transpose(tmp, (0, 2, 3, 1)).astype(np.uint8)\n",
449
+ "tmp = tmp.squeeze()\n",
450
+ "print(tmp.shape, tmp.max(), tmp.min(), tmp.mean())\n",
451
+ "Image.fromarray(tmp)\n",
452
+ "tmp_image = Image.fromarray(tmp).resize((original_w,original_h))\n",
453
+ "tmp_image.save(\"depth_image_3.png\")"
454
+ ]
455
+ },
456
+ {
457
+ "cell_type": "code",
458
+ "execution_count": 71,
459
+ "id": "9e4f00bd",
460
+ "metadata": {},
461
+ "outputs": [
462
+ {
463
+ "data": {
464
+ "text/plain": [
465
+ "torch.Size([1, 3, 518, 518])"
466
+ ]
467
+ },
468
+ "execution_count": 71,
469
+ "metadata": {},
470
+ "output_type": "execute_result"
471
+ }
472
+ ],
473
+ "source": [
474
+ "i.shape"
475
+ ]
476
+ },
477
+ {
478
+ "cell_type": "code",
479
+ "execution_count": null,
480
+ "id": "304ae7b0",
481
+ "metadata": {},
482
+ "outputs": [
483
+ {
484
+ "name": "stderr",
485
+ "output_type": "stream",
486
+ "text": [
487
+ "Converting PyTorch Frontend ==> MIL Ops: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 1247/1248 [00:00<00:00, 6927.17 ops/s]\n",
488
+ "Running MIL frontend_pytorch pipeline: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 5/5 [00:00<00:00, 90.46 passes/s]\n",
489
+ "Running MIL default pipeline: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 89/89 [00:06<00:00, 13.75 passes/s]\n",
490
+ "Running MIL backend_mlprogram pipeline: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 12/12 [00:00<00:00, 99.10 passes/s]\n"
491
+ ]
492
+ }
493
+ ],
494
+ "source": [
495
+ "traced_model_w.eval()\n",
496
+ "image_input = ct.ImageType(name=\"colorImage\", shape=i.shape)\n",
497
+ "image_encoder_model = ct.converters.convert(\n",
498
+ " traced_model_w,\n",
499
+ " convert_to=\"mlprogram\",\n",
500
+ " inputs=[image_input],\n",
501
+ " outputs=[ct.ImageType(name=\"depthOutput\")],\n",
502
+ " minimum_deployment_target=ct.target.iOS16,\n",
503
+ ")\n",
504
+ "image_encoder_model.save(\"DepthAnything_v2_large.mlpackage\")"
505
+ ]
506
+ }
507
+ ],
508
+ "metadata": {
509
+ "kernelspec": {
510
+ "display_name": "pytorch2",
511
+ "language": "python",
512
+ "name": "python3"
513
+ },
514
+ "language_info": {
515
+ "codemirror_mode": {
516
+ "name": "ipython",
517
+ "version": 3
518
+ },
519
+ "file_extension": ".py",
520
+ "mimetype": "text/x-python",
521
+ "name": "python",
522
+ "nbconvert_exporter": "python",
523
+ "pygments_lexer": "ipython3",
524
+ "version": "3.10.14"
525
+ }
526
+ },
527
+ "nbformat": 4,
528
+ "nbformat_minor": 5
529
+ }