{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "1e99de7a", "metadata": {}, "outputs": [], "source": [ "#!git clone https://huggingface.co/spaces/depth-anything/Depth-Anything-V2\n", "#!pip install -r Depth-Anything-V2/requirements.txt\n", "#!pip install -q --upgrade coremltools\n", "#!cp ./patch_dinov2.diff Depth-Anything-V2/\n", "#!cd Depth-Anything-V2 && git apply patch_dinov2.diff\n", "#!cd .." ] }, { "cell_type": "code", "execution_count": 2, "id": "d6cb8a61", "metadata": {}, "outputs": [], "source": [ "import os\n", "os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'" ] }, { "cell_type": "code", "execution_count": 3, "id": "801db364", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "scikit-learn version 1.6.0 is not supported. Minimum required version: 0.17. Maximum required version: 1.5.1. Disabling scikit-learn conversion API.\n" ] } ], "source": [ "import torch\n", "import coremltools as ct\n", "import numpy as np\n", "from PIL import Image\n", "import tempfile\n", "from huggingface_hub import hf_hub_download\n", "import sys\n", "sys.path.append('./Depth-Anything-V2')\n", "\n" ] }, { "cell_type": "code", "execution_count": 4, "id": "73882c02", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "xFormers not available\n", "xFormers not available\n" ] } ], "source": [ "from depth_anything_v2.dpt import DepthAnythingV2\n", "from depth_anything_v2.util.transform import Resize, NormalizeImage, PrepareForNet\n", "\n", "import torch.nn.functional as F" ] }, { "cell_type": "markdown", "id": "26f7dcff", "metadata": {}, "source": [ "# 1. Load Depth-Anything-V2's vitl checkpoint" ] }, { "cell_type": "code", "execution_count": 5, "id": "e67aa722", "metadata": {}, "outputs": [], "source": [ "DEVICE = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'\n", "model_configs = {\n", " 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},\n", " 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},\n", " 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},\n", " 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}\n", "}\n", "encoder2name = {\n", " 'vits': 'Small',\n", " 'vitb': 'Base',\n", " 'vitl': 'Large',\n", " 'vitg': 'Giant', # we are undergoing company review procedures to release our giant model checkpoint\n", "}\n", "encoder = 'vits'\n", "model_name = encoder2name[encoder]\n", "model = DepthAnythingV2(**model_configs[encoder])\n", "filepath = hf_hub_download(repo_id=f\"depth-anything/Depth-Anything-V2-{model_name}\", filename=f\"depth_anything_v2_{encoder}.pth\", repo_type=\"model\")\n", "state_dict = torch.load(filepath, map_location=\"cpu\")\n", "model.load_state_dict(state_dict)\n", "model = model.to(DEVICE).eval()" ] }, { "cell_type": "code", "execution_count": 6, "id": "a632e6b4", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(3024, 4032, 3)\n" ] } ], "source": [ "image = Image.open(\"./sample_images/IMG_4061.jpeg\")\n", "img = np.array(image)\n", "print(img.shape)\n", "h, w = img.shape[:2]\n", "depth = model.infer_image(img)\n", "depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0\n", "depth = depth.astype(np.uint8)\n", "depth_image = Image.fromarray(depth)\n", "depth_image.save(f\"depth_image_{model_name}_1.jpg\")" ] }, { "cell_type": "code", "execution_count": 7, "id": "77477217", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(3024, 4032, 3)\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dinov2_layers/patch_embed.py:73: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", " assert H % patch_H == 0, f\"Input image height {H} is not a multiple of patch height {patch_H}\"\n", "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dinov2_layers/patch_embed.py:74: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", " assert W % patch_W == 0, f\"Input image width {W} is not a multiple of patch width: {patch_W}\"\n", "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dinov2.py:183: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", " if npatch == N and w == h:\n", "/Users/dadler/Projects/Glide/ai-bots/depth/./Depth-Anything-V2/depth_anything_v2/dpt.py:147: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", " out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode=\"bilinear\", align_corners=True)\n" ] } ], "source": [ "original_image = Image.open(\"./sample_images/IMG_4061.jpeg\")\n", "origina_img = np.array(original_image)\n", "print(origina_img.shape)\n", "original_h, original_w = origina_img.shape[:2]\n", "# Resize the image to the input size, width must be 518 and height must be divisible by 14\n", "input_size_w = 518\n", "#input_size_h = 392 #To have this work, you need to patch dinov2.py \n", "input_size_h = 518\n", "image = original_image.resize((input_size_w,input_size_h), Image.Resampling.BILINEAR)\n", "img = np.array(image)\n", "input_image, (h, w) = model.image2tensor(img, input_size_h)\n", "input_image = input_image.to(DEVICE)\n", "with torch.no_grad():\n", " depth = model(input_image)\n", " depth = F.interpolate(depth[:, None], (h, w), mode=\"bilinear\", align_corners=True)[0, 0]\n", " depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0\n", " depth = depth.cpu().numpy().astype(np.uint8)\n", "depth_image = Image.fromarray(depth).resize((original_w,original_h), Image.Resampling.BILINEAR)\n", "depth_image.save(f\"depth_image_{model_name}_2.jpg\")\n", "\n", "traced_model = torch.jit.trace(model, input_image)\n" ] }, { "cell_type": "code", "execution_count": 8, "id": "42632870", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Traced PyTorch ImageEncoder ckpt out for jpg:\n", ">>> tensor([[0.0157, 0.0149, 0.0080, ..., 0.0410, 0.0407, 0.0510],\n", " [0.0043, 0.0084, 0.0000, ..., 0.0359, 0.0472, 0.0514],\n", " [0.0027, 0.0058, 0.0000, ..., 0.0333, 0.0354, 0.0526],\n", " ...,\n", " [0.0135, 0.0170, 0.0090, ..., 0.0534, 0.0506, 0.0532],\n", " [0.0157, 0.0203, 0.0122, ..., 0.0559, 0.0546, 0.0420],\n", " [0.0191, 0.0238, 0.0168, ..., 0.0588, 0.0576, 0.0648]],\n", " device='mps:0', grad_fn=)\n" ] } ], "source": [ "example_output = traced_model(input_image)\n", "print(\"Traced PyTorch ImageEncoder ckpt out for jpg:\\n>>>\", example_output[0, :10])" ] }, { "cell_type": "markdown", "id": "3c0d9c70", "metadata": {}, "source": [ "You can see that there is some loss in precision, but it is still acceptable." ] }, { "cell_type": "markdown", "id": "ca182b4a", "metadata": {}, "source": [ "# 2. Export ImageEncoder" ] }, { "cell_type": "code", "execution_count": 9, "id": "ef7af5c5", "metadata": {}, "outputs": [], "source": [ "image_means = [0.485, 0.456, 0.406]\n", "image_stds = [0.229, 0.224, 0.225]" ] }, { "cell_type": "code", "execution_count": 10, "id": "8f66a99c", "metadata": {}, "outputs": [], "source": [ "import torchvision.transforms as transforms\n", "\n", "class Wrapper(torch.nn.Module): \n", " def __init__(self, model):\n", " super().__init__()\n", " _means = image_means\n", " _stds = image_stds\n", " self.model = model \n", " self.stds = torch.tensor(_stds).half()[:,None,None]\n", " self.means = torch.tensor(_means).half()[:,None,None]\n", "\n", " transform_model = torch.nn.Sequential(\n", " transforms.Normalize(mean=image_means, std=image_stds)\n", " )\n", "\n", " def forward(self, input): \n", " input = input/255.0\n", " intput = self.transform_model(input)\n", " output = self.model(input)\n", " output = (output - output.min()) / (output.max() - output.min()) \n", " # Fix \"Image output, 'depthOutput', must have rank 4. Instead it has rank 3\"\n", " output = output.unsqueeze(0)\n", " # Fix \"Shape of the RGB/BGR image output, 'depthOutput', must be of kind (1, 3, H, W), i.e., first two dimensions must be (1, 3), instead they are: (1, 1)\"ArithmeticError\n", " output = output.repeat(1, 3, 1, 1)\n", " output = output * 255.0\n", " return output\n", "\n", "# Instantiate the Wrapper model passing the original PyTorch FCN model\n", "wrapped_model = Wrapper(traced_model)" ] }, { "cell_type": "code", "execution_count": 11, "id": "b3da3350", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "wrapped PyTorch ImageEncoder ckpt out for jpg:\n", ">>> tensor([[[ 1.0442, 1.0795, 1.0259, ..., 2.5866, 2.6540, 2.5864],\n", " [ 0.9688, 1.2331, 1.0579, ..., 2.8632, 2.9795, 2.7485],\n", " [ 0.9795, 1.2034, 0.9449, ..., 2.9342, 2.9196, 2.8207],\n", " ...,\n", " [100.1750, 100.6220, 100.7177, ..., 97.1819, 96.7440, 97.0862],\n", " [100.6218, 100.7040, 100.8275, ..., 97.2966, 97.6106, 97.7243],\n", " [ 99.4266, 100.6614, 100.1300, ..., 97.4383, 98.1441, 98.3714]],\n", "\n", " [[ 1.0442, 1.0795, 1.0259, ..., 2.5866, 2.6540, 2.5864],\n", " [ 0.9688, 1.2331, 1.0579, ..., 2.8632, 2.9795, 2.7485],\n", " [ 0.9795, 1.2034, 0.9449, ..., 2.9342, 2.9196, 2.8207],\n", " ...,\n", " [100.1750, 100.6220, 100.7177, ..., 97.1819, 96.7440, 97.0862],\n", " [100.6218, 100.7040, 100.8275, ..., 97.2966, 97.6106, 97.7243],\n", " [ 99.4266, 100.6614, 100.1300, ..., 97.4383, 98.1441, 98.3714]],\n", "\n", " [[ 1.0442, 1.0795, 1.0259, ..., 2.5866, 2.6540, 2.5864],\n", " [ 0.9688, 1.2331, 1.0579, ..., 2.8632, 2.9795, 2.7485],\n", " [ 0.9795, 1.2034, 0.9449, ..., 2.9342, 2.9196, 2.8207],\n", " ...,\n", " [100.1750, 100.6220, 100.7177, ..., 97.1819, 96.7440, 97.0862],\n", " [100.6218, 100.7040, 100.8275, ..., 97.2966, 97.6106, 97.7243],\n", " [ 99.4266, 100.6614, 100.1300, ..., 97.4383, 98.1441, 98.3714]]],\n", " device='mps:0')\n", "Traced wrapped PyTorch ImageEncoder ckpt out for jpg:\n", ">>> tensor([[[ 1.0442, 1.0795, 1.0259, ..., 2.5866, 2.6540, 2.5864],\n", " [ 0.9688, 1.2331, 1.0579, ..., 2.8632, 2.9795, 2.7485],\n", " [ 0.9795, 1.2034, 0.9449, ..., 2.9342, 2.9196, 2.8207],\n", " ...,\n", " [100.1750, 100.6220, 100.7177, ..., 97.1819, 96.7440, 97.0862],\n", " [100.6218, 100.7040, 100.8275, ..., 97.2966, 97.6106, 97.7243],\n", " [ 99.4266, 100.6614, 100.1300, ..., 97.4383, 98.1441, 98.3714]],\n", "\n", " [[ 1.0442, 1.0795, 1.0259, ..., 2.5866, 2.6540, 2.5864],\n", " [ 0.9688, 1.2331, 1.0579, ..., 2.8632, 2.9795, 2.7485],\n", " [ 0.9795, 1.2034, 0.9449, ..., 2.9342, 2.9196, 2.8207],\n", " ...,\n", " [100.1750, 100.6220, 100.7177, ..., 97.1819, 96.7440, 97.0862],\n", " [100.6218, 100.7040, 100.8275, ..., 97.2966, 97.6106, 97.7243],\n", " [ 99.4266, 100.6614, 100.1300, ..., 97.4383, 98.1441, 98.3714]],\n", "\n", " [[ 1.0442, 1.0795, 1.0259, ..., 2.5866, 2.6540, 2.5864],\n", " [ 0.9688, 1.2331, 1.0579, ..., 2.8632, 2.9795, 2.7485],\n", " [ 0.9795, 1.2034, 0.9449, ..., 2.9342, 2.9196, 2.8207],\n", " ...,\n", " [100.1750, 100.6220, 100.7177, ..., 97.1819, 96.7440, 97.0862],\n", " [100.6218, 100.7040, 100.8275, ..., 97.2966, 97.6106, 97.7243],\n", " [ 99.4266, 100.6614, 100.1300, ..., 97.4383, 98.1441, 98.3714]]],\n", " device='mps:0')\n" ] } ], "source": [ "i = np.asarray(original_image.resize((input_size_w, input_size_h)))\n", "i = i.astype(\"float32\")\n", "i = np.transpose(i, (2, 0, 1))\n", "i = np.expand_dims(i, 0)\n", "i = torch.from_numpy(i).to(DEVICE)\n", "\n", "with torch.no_grad():\n", " out = wrapped_model(i)\n", "\n", "print(\"wrapped PyTorch ImageEncoder ckpt out for jpg:\\n>>>\", out[0, :10])\n", "\n", "traced_model_w = torch.jit.trace(wrapped_model, i)\n", "\n", "with torch.no_grad():\n", " out = traced_model_w(i)\n", "\n", "print(\"Traced wrapped PyTorch ImageEncoder ckpt out for jpg:\\n>>>\", out[0, :10])" ] }, { "cell_type": "code", "execution_count": 12, "id": "db5cb9b9", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "(torch.Size([1, 3, 518, 518]), torch.Size([1, 3, 518, 518]))" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "i.shape, out.shape" ] }, { "cell_type": "code", "execution_count": 13, "id": "681683aa", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "(1, 3, 518, 518) 255.0 0.0 101.90155\n", "(518, 518, 3) 255 0 101.40160403094767\n" ] } ], "source": [ "tmp = out.cpu().numpy()\n", "\n", "print(tmp.shape, tmp.max(), tmp.min(), tmp.mean())\n", "# Convert to 3, 256, 256\n", "tmp = np.transpose(tmp, (0, 2, 3, 1)).astype(np.uint8)\n", "tmp = tmp.squeeze()\n", "print(tmp.shape, tmp.max(), tmp.min(), tmp.mean())\n", "Image.fromarray(tmp)\n", "tmp_image = Image.fromarray(tmp).resize((original_w,original_h))\n", "tmp_image.save(f\"depth_image_{model_name}_3.png\")" ] }, { "cell_type": "code", "execution_count": 14, "id": "9e4f00bd", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "torch.Size([1, 3, 518, 518])" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "i.shape" ] }, { "cell_type": "code", "execution_count": 15, "id": "304ae7b0", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Converting PyTorch Frontend ==> MIL Ops: 100%|█████████▉| 779/780 [00:00<00:00, 7178.40 ops/s]\n", "Running MIL frontend_pytorch pipeline: 100%|██████████| 5/5 [00:00<00:00, 150.72 passes/s]\n", "Running MIL default pipeline: 100%|██████████| 89/89 [00:01<00:00, 64.35 passes/s] \n", "Running MIL backend_mlprogram pipeline: 100%|██████████| 12/12 [00:00<00:00, 165.76 passes/s]\n" ] } ], "source": [ "traced_model_w.eval()\n", "image_input = ct.ImageType(name=\"colorImage\", shape=i.shape)\n", "image_encoder_model = ct.converters.convert(\n", " traced_model_w,\n", " convert_to=\"mlprogram\",\n", " inputs=[image_input],\n", " outputs=[ct.ImageType(name=\"depthOutput\")],\n", " minimum_deployment_target=ct.target.iOS16,\n", ")\n", "image_encoder_model.save(f\"DepthAnything_v2_{model_name}_{input_size_w}x{input_size_h}_Box.mlpackage\")" ] } ], "metadata": { "kernelspec": { "display_name": "pytorch2", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.14" } }, "nbformat": 4, "nbformat_minor": 5 }