toshas commited on
Commit
7222146
·
1 Parent(s): 77e5011

deprecate marigold-lcm demo

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +0 -41
  2. .gitignore +0 -3
  3. README.md +45 -20
  4. app.py +0 -1109
  5. extrude.py +0 -400
  6. files/basrelief/coin.jpg +0 -3
  7. files/basrelief/einstein.jpg +0 -3
  8. files/basrelief/food.jpeg +0 -3
  9. files/image/arc.jpeg +0 -3
  10. files/image/bee.jpg +0 -3
  11. files/image/berries.jpeg +0 -3
  12. files/image/butterfly.jpeg +0 -3
  13. files/image/cat.jpg +0 -3
  14. files/image/concert.jpeg +0 -3
  15. files/image/dog.jpeg +0 -3
  16. files/image/doughnuts.jpeg +0 -3
  17. files/image/einstein.jpg +0 -3
  18. files/image/food.jpeg +0 -3
  19. files/image/glasses.jpeg +0 -3
  20. files/image/house.jpg +0 -3
  21. files/image/lake.jpeg +0 -3
  22. files/image/marigold.jpeg +0 -3
  23. files/image/portrait_1.jpeg +0 -3
  24. files/image/portrait_2.jpeg +0 -3
  25. files/image/pumpkins.jpg +0 -3
  26. files/image/puzzle.jpeg +0 -3
  27. files/image/road.jpg +0 -3
  28. files/image/scientists.jpg +0 -3
  29. files/image/surfboards.jpeg +0 -3
  30. files/image/surfer.jpeg +0 -3
  31. files/image/swings.jpg +0 -3
  32. files/image/switzerland.jpeg +0 -3
  33. files/image/teamwork.jpeg +0 -3
  34. files/image/wave.jpeg +0 -3
  35. files/video/cab.mp4 +0 -3
  36. files/video/elephant.mp4 +0 -3
  37. files/video/obama.mp4 +0 -3
  38. gradio_cached_examples/examples_bas/3D model outputs high-res/0f57994f5d6ac12c1020/food_depth_512.glb.zip +0 -3
  39. gradio_cached_examples/examples_bas/3D model outputs high-res/127d9bcaf03fa5f41dd3/food_depth_512.stl.zip +0 -3
  40. gradio_cached_examples/examples_bas/3D model outputs high-res/96a98e08d96fd47e5cc6/einstein_depth_512.obj.zip +0 -3
  41. gradio_cached_examples/examples_bas/3D model outputs high-res/a17995f3d4750a0e0bbc/food_depth_512.obj.zip +0 -3
  42. gradio_cached_examples/examples_bas/3D model outputs high-res/b0b93bdcbedf077307ba/coin_depth_512.stl.zip +0 -3
  43. gradio_cached_examples/examples_bas/3D model outputs high-res/c7499e9097e58b706e51/einstein_depth_512.glb.zip +0 -3
  44. gradio_cached_examples/examples_bas/3D model outputs high-res/ebe8a8d03fbc1a1fc2bd/coin_depth_512.glb.zip +0 -3
  45. gradio_cached_examples/examples_bas/3D model outputs high-res/ee9ee048f590c0c9a2c8/einstein_depth_512.stl.zip +0 -3
  46. gradio_cached_examples/examples_bas/3D model outputs high-res/fbaa26ffc2eb3654c177/coin_depth_512.obj.zip +0 -3
  47. gradio_cached_examples/examples_bas/3D preview low-res relief highlight/78ff2a583036eab8fe9b/coin_depth_256.glb +0 -3
  48. gradio_cached_examples/examples_bas/3D preview low-res relief highlight/8feb5fe1e8941c880c40/food_depth_256.glb +0 -3
  49. gradio_cached_examples/examples_bas/3D preview low-res relief highlight/bb26fd8a9d7890806329/einstein_depth_256.glb +0 -3
  50. gradio_cached_examples/examples_bas/log.csv +0 -4
.gitattributes DELETED
@@ -1,41 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
36
- *.stl filter=lfs diff=lfs merge=lfs -text
37
- *.glb filter=lfs diff=lfs merge=lfs -text
38
- *.jpg filter=lfs diff=lfs merge=lfs -text
39
- *.jpeg filter=lfs diff=lfs merge=lfs -text
40
- *.png filter=lfs diff=lfs merge=lfs -text
41
- *.mp4 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore DELETED
@@ -1,3 +0,0 @@
1
- .idea
2
- .DS_Store
3
- __pycache__
 
 
 
 
README.md CHANGED
@@ -1,28 +1,53 @@
1
  ---
2
- title: Marigold-LCM Depth Estimation
3
- emoji: 🏵️
4
  colorFrom: blue
5
  colorTo: red
6
- sdk: gradio
7
- sdk_version: 4.32.2
8
- app_file: app.py
9
- pinned: true
10
  license: cc-by-sa-4.0
11
  models:
12
  - prs-eth/marigold-depth-lcm-v1-0
13
- hf_oauth: true
14
- hf_oauth_expiration_minutes: 43200
15
  ---
16
 
17
- This is a demo of Marigold-LCM, the state-of-the-art depth estimator for images in the wild.
18
- It combines the power of the original Marigold 10-step estimator and the Latent Consistency Models, delivering high-quality results in as little as one step.
19
- Find out more in our CVPR 2024 Oral paper titled ["Repurposing Diffusion-Based Image Generators for Monocular Depth Estimation"](https://arxiv.org/abs/2312.02145)
20
-
21
- ```
22
- @InProceedings{ke2023repurposing,
23
- title={Repurposing Diffusion-Based Image Generators for Monocular Depth Estimation},
24
- author={Bingxin Ke and Anton Obukhov and Shengyu Huang and Nando Metzger and Rodrigo Caye Daudt and Konrad Schindler},
25
- booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
26
- year={2024}
27
- }
28
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Marigold-LCM Depth Estimation (Deprecated)
3
+ emoji: 🔴
4
  colorFrom: blue
5
  colorTo: red
6
+ sdk: static
7
+ pinned: false
 
 
8
  license: cc-by-sa-4.0
9
  models:
10
  - prs-eth/marigold-depth-lcm-v1-0
 
 
11
  ---
12
 
13
+ <h2 align="center">Marigold-LCM Depth Estimation</h2>
14
+ <p align="center"><span style="color: red;"><b>This demo is deprecated.</b></span></p>
15
+ <p align="center"><b>Fast and reliable</b> single-step estimation is now available in the original Marigold
16
+ <a title="Image Depth" href="https://huggingface.co/spaces/prs-eth/marigold" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
17
+ <img src="https://img.shields.io/badge/%F0%9F%A4%97%20Image%20Depth%20-Demo-yellow" style="vertical-align: middle;" alt="imagedepth">
18
+ </a>
19
+ </p>
20
+ <p align="center"><b>3D-printable bas-relief</b> models creation is now available in a separate
21
+ <a title="Depth-to-3D" href="https://huggingface.co/spaces/prs-eth/depth-to-3d-print" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
22
+ <img src="https://img.shields.io/badge/%F0%9F%A4%97%20Depth--to--3D%20-Demo-yellow" style="vertical-align: middle;" alt="depthto3d">
23
+ </a>
24
+ </p>
25
+ <p align="center"><b>Video depth</b> processing function is improved and availble in a separate
26
+ <a title="Video Depth" href="https://huggingface.co/spaces/prs-eth/rollingdepth" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
27
+ <img src="https://img.shields.io/badge/%F0%9F%A4%97%20Video%20Depth%20-Demo-yellow" style="vertical-align: middle;" alt="videodepth">
28
+ </a>
29
+ </p>
30
+ <p align="center"><span style="color: blue;"><b>Check out other Marigold resources:</b></span></p>
31
+ <p align="center">
32
+ <a title="Website" href="https://marigoldmonodepth.github.io/" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
33
+ <img src="https://img.shields.io/badge/%E2%99%A5%20Project%20-Website-blue">
34
+ </a>
35
+ <a title="arXiv" href="https://arxiv.org/abs/2312.02145" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
36
+ <img src="https://img.shields.io/badge/%F0%9F%93%84%20Read%20-Paper-AF3436">
37
+ </a>
38
+ <a title="Github" href="https://github.com/prs-eth/marigold" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
39
+ <img src="https://img.shields.io/github/stars/prs-eth/marigold?label=GitHub%20%E2%98%85&logo=github&color=C8C" alt="badge-github-stars">
40
+ </a>
41
+ <a title="Image Normals" href="https://huggingface.co/spaces/prs-eth/marigold-normals" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
42
+ <img src="https://img.shields.io/badge/%F0%9F%A4%97%20Image%20Normals%20-Demo-yellow" alt="imagedepth">
43
+ </a>
44
+ <a title="Image Intrinsics" href="https://huggingface.co/spaces/prs-eth/marigold-iid" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
45
+ <img src="https://img.shields.io/badge/%F0%9F%A4%97%20Image%20Intrinsics%20-Demo-yellow" alt="imagedepth">
46
+ </a>
47
+ <a title="LiDAR Depth" href="https://huggingface.co/spaces/prs-eth/marigold-dc" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
48
+ <img src="https://img.shields.io/badge/%F0%9F%A4%97%20LiDAR%20Depth%20-Demo-yellow" alt="imagedepth">
49
+ </a>
50
+ <a title="Social" href="https://twitter.com/antonobukhov1" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
51
+ <img src="https://shields.io/twitter/follow/:?label=Subscribe%20for%20updates!" alt="social">
52
+ </a>
53
+ </p>
app.py DELETED
@@ -1,1109 +0,0 @@
1
- # Copyright 2024 Anton Obukhov, ETH Zurich. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # --------------------------------------------------------------------------
15
- # If you find this code useful, we kindly ask you to cite our paper in your work.
16
- # Please find bibtex at: https://github.com/prs-eth/Marigold#-citation
17
- # More information about the method can be found at https://marigoldmonodepth.github.io
18
- # --------------------------------------------------------------------------
19
- from __future__ import annotations
20
-
21
- import functools
22
- import os
23
- import tempfile
24
- import warnings
25
- import zipfile
26
- from io import BytesIO
27
-
28
- import diffusers
29
- import gradio as gr
30
- import imageio as imageio
31
- import numpy as np
32
- import spaces
33
- import torch as torch
34
- from PIL import Image
35
- from diffusers import MarigoldDepthPipeline
36
- from gradio_imageslider import ImageSlider
37
- from huggingface_hub import login
38
- from tqdm import tqdm
39
-
40
- from extrude import extrude_depth_3d
41
- from gradio_patches.examples import Examples
42
- from gradio_patches.flagging import FlagMethod, HuggingFaceDatasetSaver
43
-
44
- warnings.filterwarnings(
45
- "ignore", message=".*LoginButton created outside of a Blocks context.*"
46
- )
47
-
48
- default_seed = 2024
49
- default_batch_size = 4
50
-
51
- default_image_num_inference_steps = 4
52
- default_image_ensemble_size = 1
53
- default_image_processing_resolution = 768
54
- default_image_reproducuble = True
55
-
56
- default_video_depth_latent_init_strength = 0.1
57
- default_video_num_inference_steps = 1
58
- default_video_ensemble_size = 1
59
- default_video_processing_resolution = 768
60
- default_video_out_max_frames = 450
61
-
62
- default_bas_plane_near = 0.0
63
- default_bas_plane_far = 1.0
64
- default_bas_embossing = 20
65
- default_bas_num_inference_steps = 4
66
- default_bas_ensemble_size = 1
67
- default_bas_processing_resolution = 768
68
- default_bas_size_longest_px = 512
69
- default_bas_size_longest_cm = 10
70
- default_bas_filter_size = 3
71
- default_bas_frame_thickness = 5
72
- default_bas_frame_near = 1
73
- default_bas_frame_far = 1
74
-
75
- default_share_always_show_hf_logout_btn = True
76
- default_share_always_show_accordion = False
77
-
78
-
79
- def process_image_check(path_input):
80
- if path_input is None:
81
- raise gr.Error(
82
- "Missing image in the first pane: upload a file or use one from the gallery below."
83
- )
84
-
85
-
86
- def process_image(
87
- pipe,
88
- path_input,
89
- num_inference_steps=default_image_num_inference_steps,
90
- ensemble_size=default_image_ensemble_size,
91
- processing_resolution=default_image_processing_resolution,
92
- ):
93
- name_base, name_ext = os.path.splitext(os.path.basename(path_input))
94
- print(f"Processing image {name_base}{name_ext}")
95
-
96
- path_output_dir = tempfile.mkdtemp()
97
- path_out_fp32 = os.path.join(path_output_dir, f"{name_base}_depth_fp32.npy")
98
- path_out_16bit = os.path.join(path_output_dir, f"{name_base}_depth_16bit.png")
99
- path_out_vis = os.path.join(path_output_dir, f"{name_base}_depth_colored.png")
100
-
101
- input_image = Image.open(path_input)
102
-
103
- generator = torch.Generator(device=pipe.device).manual_seed(default_seed)
104
-
105
- pipe_out = pipe(
106
- input_image,
107
- num_inference_steps=num_inference_steps,
108
- ensemble_size=ensemble_size,
109
- processing_resolution=processing_resolution,
110
- batch_size=1 if processing_resolution == 0 else default_batch_size,
111
- generator=generator,
112
- )
113
-
114
- depth_pred = pipe_out.prediction[0, :, :, 0]
115
- depth_colored = pipe.image_processor.visualize_depth(pipe_out.prediction)[0]
116
- depth_16bit = pipe.image_processor.export_depth_to_16bit_png(pipe_out.prediction)[0]
117
-
118
- np.save(path_out_fp32, depth_pred)
119
- depth_16bit.save(path_out_16bit)
120
- depth_colored.save(path_out_vis)
121
-
122
- return (
123
- [path_out_16bit, path_out_vis],
124
- [path_out_16bit, path_out_fp32, path_out_vis],
125
- )
126
-
127
-
128
- def process_video(
129
- pipe,
130
- path_input,
131
- depth_latent_init_strength=default_video_depth_latent_init_strength,
132
- num_inference_steps=default_video_num_inference_steps,
133
- ensemble_size=default_video_ensemble_size,
134
- processing_resolution=default_video_processing_resolution,
135
- out_max_frames=default_video_out_max_frames,
136
- progress=gr.Progress(),
137
- ):
138
- if path_input is None:
139
- raise gr.Error(
140
- "Missing video in the first pane: upload a file or use one from the gallery below."
141
- )
142
-
143
- name_base, name_ext = os.path.splitext(os.path.basename(path_input))
144
- print(f"Processing video {name_base}{name_ext}")
145
-
146
- path_output_dir = tempfile.mkdtemp()
147
- path_out_vis = os.path.join(path_output_dir, f"{name_base}_depth_colored.mp4")
148
- path_out_16bit = os.path.join(path_output_dir, f"{name_base}_depth_16bit.zip")
149
-
150
- generator = torch.Generator(device=pipe.device).manual_seed(default_seed)
151
-
152
- reader, writer, zipf = None, None, None
153
- try:
154
- pipe.vae, pipe.vae_tiny = pipe.vae_tiny, pipe.vae
155
-
156
- reader = imageio.get_reader(path_input)
157
-
158
- meta_data = reader.get_meta_data()
159
- fps = meta_data["fps"]
160
- size = meta_data["size"]
161
- max_orig = max(size)
162
- duration_sec = meta_data["duration"]
163
- total_frames = int(fps * duration_sec)
164
-
165
- out_duration_sec = out_max_frames / fps
166
- if duration_sec > out_duration_sec:
167
- gr.Warning(
168
- f"Only the first ~{int(out_duration_sec)} seconds will be processed; "
169
- f"use alternative setups such as ComfyUI Marigold node for full processing"
170
- )
171
-
172
- writer = imageio.get_writer(path_out_vis, fps=fps)
173
-
174
- zipf = zipfile.ZipFile(path_out_16bit, "w", zipfile.ZIP_DEFLATED)
175
-
176
- last_frame_latent = None
177
- latent_common = torch.randn(
178
- (
179
- 1,
180
- 4,
181
- (768 * size[1] + 7 * max_orig) // (8 * max_orig),
182
- (768 * size[0] + 7 * max_orig) // (8 * max_orig),
183
- ),
184
- generator=generator,
185
- device=pipe.device,
186
- dtype=torch.float16,
187
- )
188
-
189
- out_frame_id = 0
190
- pbar = tqdm(desc="Processing Video", total=min(out_max_frames, total_frames))
191
-
192
- for frame_id, frame in enumerate(reader):
193
- out_frame_id += 1
194
- pbar.update(1)
195
- if out_frame_id > out_max_frames:
196
- break
197
-
198
- frame_pil = Image.fromarray(frame)
199
-
200
- latents = latent_common
201
- if last_frame_latent is not None:
202
- assert (
203
- last_frame_latent.shape == latent_common.shape
204
- ), f"{last_frame_latent.shape}, {latent_common.shape}"
205
- latents = (
206
- 1 - depth_latent_init_strength
207
- ) * latents + depth_latent_init_strength * last_frame_latent
208
-
209
- pipe_out = pipe(
210
- frame_pil,
211
- num_inference_steps=num_inference_steps,
212
- ensemble_size=ensemble_size,
213
- processing_resolution=processing_resolution,
214
- match_input_resolution=False,
215
- batch_size=1,
216
- latents=latents,
217
- output_latent=True,
218
- )
219
-
220
- last_frame_latent = pipe_out.latent
221
-
222
- processed_frame = pipe.image_processor.visualize_depth( # noqa
223
- pipe_out.prediction
224
- )[0]
225
- processed_frame = imageio.core.util.Array(np.array(processed_frame))
226
- writer.append_data(processed_frame)
227
-
228
- archive_path = os.path.join(
229
- f"{name_base}_depth_16bit", f"{out_frame_id:05d}.png"
230
- )
231
- img_byte_arr = BytesIO()
232
- processed_frame = pipe.image_processor.export_depth_to_16bit_png(
233
- pipe_out.prediction
234
- )[0]
235
- processed_frame.save(img_byte_arr, format="png")
236
- img_byte_arr.seek(0)
237
- zipf.writestr(archive_path, img_byte_arr.read())
238
- finally:
239
- if zipf is not None:
240
- zipf.close()
241
-
242
- if writer is not None:
243
- writer.close()
244
-
245
- if reader is not None:
246
- reader.close()
247
-
248
- pipe.vae, pipe.vae_tiny = pipe.vae_tiny, pipe.vae
249
-
250
- return (
251
- path_out_vis,
252
- [path_out_vis, path_out_16bit],
253
- )
254
-
255
-
256
- def process_bas(
257
- pipe,
258
- path_input,
259
- plane_near=default_bas_plane_near,
260
- plane_far=default_bas_plane_far,
261
- embossing=default_bas_embossing,
262
- num_inference_steps=default_bas_num_inference_steps,
263
- ensemble_size=default_bas_ensemble_size,
264
- processing_resolution=default_bas_processing_resolution,
265
- size_longest_px=default_bas_size_longest_px,
266
- size_longest_cm=default_bas_size_longest_cm,
267
- filter_size=default_bas_filter_size,
268
- frame_thickness=default_bas_frame_thickness,
269
- frame_near=default_bas_frame_near,
270
- frame_far=default_bas_frame_far,
271
- ):
272
- if path_input is None:
273
- raise gr.Error(
274
- "Missing image in the first pane: upload a file or use one from the gallery below."
275
- )
276
-
277
- if plane_near >= plane_far:
278
- raise gr.Error("NEAR plane must have a value smaller than the FAR plane")
279
-
280
- name_base, name_ext = os.path.splitext(os.path.basename(path_input))
281
- print(f"Processing bas-relief {name_base}{name_ext}")
282
-
283
- path_output_dir = tempfile.mkdtemp()
284
-
285
- input_image = Image.open(path_input)
286
-
287
- generator = torch.Generator(device=pipe.device).manual_seed(default_seed)
288
-
289
- pipe_out = pipe(
290
- input_image,
291
- num_inference_steps=num_inference_steps,
292
- ensemble_size=ensemble_size,
293
- processing_resolution=processing_resolution,
294
- generator=generator,
295
- )
296
-
297
- depth_pred = pipe_out.prediction[0, :, :, 0] * 65535
298
-
299
- def _process_3d(
300
- size_longest_px,
301
- filter_size,
302
- vertex_colors,
303
- scene_lights,
304
- output_model_scale=None,
305
- prepare_for_3d_printing=False,
306
- zip_outputs=False,
307
- ):
308
- image_rgb_w, image_rgb_h = input_image.width, input_image.height
309
- image_rgb_d = max(image_rgb_w, image_rgb_h)
310
- image_new_w = size_longest_px * image_rgb_w // image_rgb_d
311
- image_new_h = size_longest_px * image_rgb_h // image_rgb_d
312
-
313
- image_rgb_new = os.path.join(
314
- path_output_dir, f"{name_base}_rgb_{size_longest_px}{name_ext}"
315
- )
316
- image_depth_new = os.path.join(
317
- path_output_dir, f"{name_base}_depth_{size_longest_px}.png"
318
- )
319
- input_image.resize((image_new_w, image_new_h), Image.LANCZOS).save(
320
- image_rgb_new
321
- )
322
- Image.fromarray(depth_pred).convert(mode="F").resize(
323
- (image_new_w, image_new_h), Image.BILINEAR
324
- ).convert("I").save(image_depth_new)
325
-
326
- path_glb, path_stl, path_obj = extrude_depth_3d(
327
- image_rgb_new,
328
- image_depth_new,
329
- output_model_scale=(
330
- size_longest_cm * 10
331
- if output_model_scale is None
332
- else output_model_scale
333
- ),
334
- filter_size=filter_size,
335
- coef_near=plane_near,
336
- coef_far=plane_far,
337
- emboss=embossing / 100,
338
- f_thic=frame_thickness / 100,
339
- f_near=frame_near / 100,
340
- f_back=frame_far / 100,
341
- vertex_colors=vertex_colors,
342
- scene_lights=scene_lights,
343
- prepare_for_3d_printing=prepare_for_3d_printing,
344
- zip_outputs=zip_outputs,
345
- )
346
-
347
- return path_glb, path_stl, path_obj
348
-
349
- path_viewer_glb, _, _ = _process_3d(
350
- 256, filter_size, vertex_colors=False, scene_lights=True, output_model_scale=1
351
- )
352
- path_files_glb, path_files_stl, path_files_obj = _process_3d(
353
- size_longest_px,
354
- filter_size,
355
- vertex_colors=True,
356
- scene_lights=False,
357
- prepare_for_3d_printing=True,
358
- zip_outputs=True,
359
- )
360
-
361
- return path_viewer_glb, [path_files_glb, path_files_stl, path_files_obj]
362
-
363
-
364
- def run_demo_server(pipe, hf_writer=None):
365
- process_pipe_image = spaces.GPU(functools.partial(process_image, pipe))
366
- process_pipe_video = spaces.GPU(
367
- functools.partial(process_video, pipe), duration=120
368
- )
369
- process_pipe_bas = spaces.GPU(functools.partial(process_bas, pipe))
370
-
371
- gradio_theme = gr.themes.Default()
372
-
373
- with gr.Blocks(
374
- theme=gradio_theme,
375
- title="Marigold-LCM Depth Estimation",
376
- css="""
377
- #download {
378
- height: 118px;
379
- }
380
- .slider .inner {
381
- width: 5px;
382
- background: #FFF;
383
- }
384
- .viewport {
385
- aspect-ratio: 4/3;
386
- }
387
- .tabs button.selected {
388
- font-size: 20px !important;
389
- color: crimson !important;
390
- }
391
- h1 {
392
- text-align: center;
393
- display: block;
394
- }
395
- h2 {
396
- text-align: center;
397
- display: block;
398
- }
399
- h3 {
400
- text-align: center;
401
- display: block;
402
- }
403
- .md_feedback li {
404
- margin-bottom: 0px !important;
405
- }
406
- """,
407
- head="""
408
- <script async src="https://www.googletagmanager.com/gtag/js?id=G-1FWSVCGZTG"></script>
409
- <script>
410
- window.dataLayer = window.dataLayer || [];
411
- function gtag() {dataLayer.push(arguments);}
412
- gtag('js', new Date());
413
- gtag('config', 'G-1FWSVCGZTG');
414
- </script>
415
- """,
416
- ) as demo:
417
- if hf_writer is not None:
418
- print("Creating login button")
419
- share_login_btn = gr.LoginButton(size="sm", scale=1, render=False)
420
- print("Created login button")
421
- share_login_btn.activate()
422
- print("Activated login button")
423
-
424
- gr.Markdown(
425
- """
426
- # Marigold-LCM Depth Estimation
427
- <p align="center">
428
- <a title="Website" href="https://marigoldmonodepth.github.io/" target="_blank" rel="noopener noreferrer"
429
- style="display: inline-block;">
430
- <img src="https://www.obukhov.ai/img/badges/badge-website.svg">
431
- </a>
432
- <a title="arXiv" href="https://arxiv.org/abs/2312.02145" target="_blank" rel="noopener noreferrer"
433
- style="display: inline-block;">
434
- <img src="https://www.obukhov.ai/img/badges/badge-pdf.svg">
435
- </a>
436
- <a title="Github" href="https://github.com/prs-eth/marigold" target="_blank" rel="noopener noreferrer"
437
- style="display: inline-block;">
438
- <img src="https://img.shields.io/github/stars/prs-eth/marigold?label=GitHub%20%E2%98%85&logo=github&color=C8C"
439
- alt="badge-github-stars">
440
- </a>
441
- <a title="Social" href="https://twitter.com/antonobukhov1" target="_blank" rel="noopener noreferrer"
442
- style="display: inline-block;">
443
- <img src="https://www.obukhov.ai/img/badges/badge-social.svg" alt="social">
444
- </a>
445
- </p>
446
- <p align="justify">
447
- Marigold-LCM is the fast version of Marigold, the state-of-the-art depth estimator for images in the
448
- wild. It combines the power of the original Marigold 10-step estimator and the Latent Consistency
449
- Models, delivering high-quality results in as little as <b>one step</b>. We provide three functions
450
- in this demo: Image, Video, and Bas-relief 3D processing — <b>see the tabs below</b>. Upload your
451
- content into the <b>first</b> pane, or click any of the <b>examples</b> below. Wait a second (for
452
- images and 3D) or a minute (for videos), and interact with the result in the <b>second</b> pane. To
453
- avoid queuing, fork the demo into your profile.
454
- <a href="https://huggingface.co/spaces/prs-eth/marigold">
455
- The original Marigold demo is also available
456
- </a>.
457
- </p>
458
- """
459
- )
460
-
461
- def get_share_instructions(is_full):
462
- out = (
463
- "### Help us improve Marigold! If the output is not what you expected, "
464
- "you can help us by sharing it with us privately.\n"
465
- )
466
- if is_full:
467
- out += (
468
- "1. Sign into your Hugging Face account using the button below.\n"
469
- "1. Signing in may reset the demo and results; in that case, process the image again.\n"
470
- )
471
- out += "1. Review and agree to the terms of usage and enter an optional message to us.\n"
472
- out += "1. Click the 'Share' button to submit the image to us privately.\n"
473
- return out
474
-
475
- def get_share_conditioned_on_login(profile: gr.OAuthProfile | None):
476
- state_logged_out = profile is None
477
- return get_share_instructions(is_full=state_logged_out), gr.Button(
478
- visible=(state_logged_out or default_share_always_show_hf_logout_btn)
479
- )
480
-
481
- with gr.Tabs(elem_classes=["tabs"]):
482
- with gr.Tab("Image"):
483
- with gr.Row():
484
- with gr.Column():
485
- image_input = gr.Image(
486
- label="Input Image",
487
- type="filepath",
488
- )
489
- with gr.Row():
490
- image_submit_btn = gr.Button(
491
- value="Compute Depth", variant="primary"
492
- )
493
- image_reset_btn = gr.Button(value="Reset")
494
- with gr.Accordion("Advanced options", open=False):
495
- image_num_inference_steps = gr.Slider(
496
- label="Number of denoising steps",
497
- minimum=1,
498
- maximum=4,
499
- step=1,
500
- value=default_image_num_inference_steps,
501
- )
502
- image_ensemble_size = gr.Slider(
503
- label="Ensemble size",
504
- minimum=1,
505
- maximum=10,
506
- step=1,
507
- value=default_image_ensemble_size,
508
- )
509
- image_processing_resolution = gr.Radio(
510
- [
511
- ("Native", 0),
512
- ("Recommended", 768),
513
- ],
514
- label="Processing resolution",
515
- value=default_image_processing_resolution,
516
- )
517
- with gr.Column():
518
- image_output_slider = ImageSlider(
519
- label="Predicted depth (red-near, blue-far)",
520
- type="filepath",
521
- show_download_button=True,
522
- show_share_button=True,
523
- interactive=False,
524
- elem_classes="slider",
525
- position=0.25,
526
- )
527
- image_output_files = gr.Files(
528
- label="Depth outputs",
529
- elem_id="download",
530
- interactive=False,
531
- )
532
-
533
- if hf_writer is not None:
534
- with gr.Accordion(
535
- "Feedback",
536
- open=False,
537
- visible=default_share_always_show_accordion,
538
- ) as share_box:
539
- share_instructions = gr.Markdown(
540
- get_share_instructions(is_full=True),
541
- elem_classes="md_feedback",
542
- )
543
- share_transfer_of_rights = gr.Checkbox(
544
- label="(Optional) I own or hold necessary rights to the submitted image. By "
545
- "checking this box, I grant an irrevocable, non-exclusive, transferable, "
546
- "royalty-free, worldwide license to use the uploaded image, including for "
547
- "publishing, reproducing, and model training. [transfer_of_rights]",
548
- scale=1,
549
- )
550
- share_content_is_legal = gr.Checkbox(
551
- label="By checking this box, I acknowledge that my uploaded content is legal and "
552
- "safe, and that I am solely responsible for ensuring it complies with all "
553
- "applicable laws and regulations. Additionally, I am aware that my Hugging Face "
554
- "username is collected. [content_is_legal]",
555
- scale=1,
556
- )
557
- share_reason = gr.Textbox(
558
- label="(Optional) Reason for feedback",
559
- max_lines=1,
560
- interactive=True,
561
- )
562
- with gr.Row():
563
- share_login_btn.render()
564
- share_share_btn = gr.Button(
565
- "Share", variant="stop", scale=1
566
- )
567
-
568
- Examples(
569
- fn=process_pipe_image,
570
- examples=[
571
- os.path.join("files", "image", name)
572
- for name in [
573
- "arc.jpeg",
574
- "berries.jpeg",
575
- "butterfly.jpeg",
576
- "cat.jpg",
577
- "concert.jpeg",
578
- "dog.jpeg",
579
- "doughnuts.jpeg",
580
- "einstein.jpg",
581
- "food.jpeg",
582
- "glasses.jpeg",
583
- "house.jpg",
584
- "lake.jpeg",
585
- "marigold.jpeg",
586
- "portrait_1.jpeg",
587
- "portrait_2.jpeg",
588
- "pumpkins.jpg",
589
- "puzzle.jpeg",
590
- "road.jpg",
591
- "scientists.jpg",
592
- "surfboards.jpeg",
593
- "surfer.jpeg",
594
- "swings.jpg",
595
- "switzerland.jpeg",
596
- "teamwork.jpeg",
597
- "wave.jpeg",
598
- ]
599
- ],
600
- inputs=[image_input],
601
- outputs=[image_output_slider, image_output_files],
602
- cache_examples=True,
603
- directory_name="examples_image",
604
- )
605
-
606
- with gr.Tab("Video"):
607
- with gr.Row():
608
- with gr.Column():
609
- video_input = gr.Video(
610
- label="Input Video",
611
- sources=["upload"],
612
- )
613
- with gr.Row():
614
- video_submit_btn = gr.Button(
615
- value="Compute Depth", variant="primary"
616
- )
617
- video_reset_btn = gr.Button(value="Reset")
618
- with gr.Column():
619
- video_output_video = gr.Video(
620
- label="Output video depth (red-near, blue-far)",
621
- interactive=False,
622
- )
623
- video_output_files = gr.Files(
624
- label="Depth outputs",
625
- elem_id="download",
626
- interactive=False,
627
- )
628
- Examples(
629
- fn=process_pipe_video,
630
- examples=[
631
- os.path.join("files", "video", name)
632
- for name in [
633
- "cab.mp4",
634
- "elephant.mp4",
635
- "obama.mp4",
636
- ]
637
- ],
638
- inputs=[video_input],
639
- outputs=[video_output_video, video_output_files],
640
- cache_examples=True,
641
- directory_name="examples_video",
642
- )
643
-
644
- with gr.Tab("Bas-relief (3D)"):
645
- gr.Markdown(
646
- """
647
- <p align="justify">
648
- This part of the demo uses Marigold-LCM to create a bas-relief model.
649
- The models are watertight, with correct normals, and exported in the STL format, which makes
650
- them <b>3D-printable</b>.
651
- </p>
652
- """,
653
- )
654
- with gr.Row():
655
- with gr.Column():
656
- bas_input = gr.Image(
657
- label="Input Image",
658
- type="filepath",
659
- )
660
- with gr.Row():
661
- bas_submit_btn = gr.Button(
662
- value="Create 3D", variant="primary"
663
- )
664
- bas_reset_btn = gr.Button(value="Reset")
665
- with gr.Accordion("3D printing demo: Main options", open=True):
666
- bas_plane_near = gr.Slider(
667
- label="Relative position of the near plane (between 0 and 1)",
668
- minimum=0.0,
669
- maximum=1.0,
670
- step=0.001,
671
- value=default_bas_plane_near,
672
- )
673
- bas_plane_far = gr.Slider(
674
- label="Relative position of the far plane (between near and 1)",
675
- minimum=0.0,
676
- maximum=1.0,
677
- step=0.001,
678
- value=default_bas_plane_far,
679
- )
680
- bas_embossing = gr.Slider(
681
- label="Embossing level",
682
- minimum=0,
683
- maximum=100,
684
- step=1,
685
- value=default_bas_embossing,
686
- )
687
- with gr.Accordion(
688
- "3D printing demo: Advanced options", open=False
689
- ):
690
- bas_num_inference_steps = gr.Slider(
691
- label="Number of denoising steps",
692
- minimum=1,
693
- maximum=4,
694
- step=1,
695
- value=default_bas_num_inference_steps,
696
- )
697
- bas_ensemble_size = gr.Slider(
698
- label="Ensemble size",
699
- minimum=1,
700
- maximum=10,
701
- step=1,
702
- value=default_bas_ensemble_size,
703
- )
704
- bas_processing_resolution = gr.Radio(
705
- [
706
- ("Native", 0),
707
- ("Recommended", 768),
708
- ],
709
- label="Processing resolution",
710
- value=default_bas_processing_resolution,
711
- )
712
- bas_size_longest_px = gr.Slider(
713
- label="Size (px) of the longest side",
714
- minimum=256,
715
- maximum=1024,
716
- step=256,
717
- value=default_bas_size_longest_px,
718
- )
719
- bas_size_longest_cm = gr.Slider(
720
- label="Size (cm) of the longest side",
721
- minimum=1,
722
- maximum=100,
723
- step=1,
724
- value=default_bas_size_longest_cm,
725
- )
726
- bas_filter_size = gr.Slider(
727
- label="Size (px) of the smoothing filter",
728
- minimum=1,
729
- maximum=5,
730
- step=2,
731
- value=default_bas_filter_size,
732
- )
733
- bas_frame_thickness = gr.Slider(
734
- label="Frame thickness",
735
- minimum=0,
736
- maximum=100,
737
- step=1,
738
- value=default_bas_frame_thickness,
739
- )
740
- bas_frame_near = gr.Slider(
741
- label="Frame's near plane offset",
742
- minimum=-100,
743
- maximum=100,
744
- step=1,
745
- value=default_bas_frame_near,
746
- )
747
- bas_frame_far = gr.Slider(
748
- label="Frame's far plane offset",
749
- minimum=1,
750
- maximum=10,
751
- step=1,
752
- value=default_bas_frame_far,
753
- )
754
- with gr.Column():
755
- bas_output_viewer = gr.Model3D(
756
- camera_position=(75.0, 90.0, 1.25),
757
- elem_classes="viewport",
758
- label="3D preview (low-res, relief highlight)",
759
- interactive=False,
760
- )
761
- bas_output_files = gr.Files(
762
- label="3D model outputs (high-res)",
763
- elem_id="download",
764
- interactive=False,
765
- )
766
- Examples(
767
- fn=process_pipe_bas,
768
- examples=[
769
- [
770
- "files/basrelief/coin.jpg", # input
771
- 0.0, # plane_near
772
- 0.66, # plane_far
773
- 15, # embossing
774
- 4, # num_inference_steps
775
- 4, # ensemble_size
776
- 768, # processing_resolution
777
- 512, # size_longest_px
778
- 10, # size_longest_cm
779
- 3, # filter_size
780
- 5, # frame_thickness
781
- 0, # frame_near
782
- 1, # frame_far
783
- ],
784
- [
785
- "files/basrelief/einstein.jpg", # input
786
- 0.0, # plane_near
787
- 0.5, # plane_far
788
- 50, # embossing
789
- 2, # num_inference_steps
790
- 1, # ensemble_size
791
- 768, # processing_resolution
792
- 512, # size_longest_px
793
- 10, # size_longest_cm
794
- 3, # filter_size
795
- 5, # frame_thickness
796
- -25, # frame_near
797
- 1, # frame_far
798
- ],
799
- [
800
- "files/basrelief/food.jpeg", # input
801
- 0.0, # plane_near
802
- 1.0, # plane_far
803
- 20, # embossing
804
- 2, # num_inference_steps
805
- 4, # ensemble_size
806
- 768, # processing_resolution
807
- 512, # size_longest_px
808
- 10, # size_longest_cm
809
- 3, # filter_size
810
- 5, # frame_thickness
811
- -5, # frame_near
812
- 1, # frame_far
813
- ],
814
- ],
815
- inputs=[
816
- bas_input,
817
- bas_plane_near,
818
- bas_plane_far,
819
- bas_embossing,
820
- bas_num_inference_steps,
821
- bas_ensemble_size,
822
- bas_processing_resolution,
823
- bas_size_longest_px,
824
- bas_size_longest_cm,
825
- bas_filter_size,
826
- bas_frame_thickness,
827
- bas_frame_near,
828
- bas_frame_far,
829
- ],
830
- outputs=[bas_output_viewer, bas_output_files],
831
- cache_examples=True,
832
- directory_name="examples_bas",
833
- )
834
-
835
- ### Image tab
836
-
837
- if hf_writer is not None:
838
- image_submit_btn.click(
839
- fn=process_image_check,
840
- inputs=image_input,
841
- outputs=None,
842
- preprocess=False,
843
- queue=False,
844
- ).success(
845
- get_share_conditioned_on_login,
846
- None,
847
- [share_instructions, share_login_btn],
848
- queue=False,
849
- ).then(
850
- lambda: (
851
- gr.Button(value="Share", interactive=True),
852
- gr.Accordion(visible=True),
853
- False,
854
- False,
855
- "",
856
- ),
857
- None,
858
- [
859
- share_share_btn,
860
- share_box,
861
- share_transfer_of_rights,
862
- share_content_is_legal,
863
- share_reason,
864
- ],
865
- queue=False,
866
- ).then(
867
- fn=process_pipe_image,
868
- inputs=[
869
- image_input,
870
- image_num_inference_steps,
871
- image_ensemble_size,
872
- image_processing_resolution,
873
- ],
874
- outputs=[image_output_slider, image_output_files],
875
- concurrency_limit=1,
876
- )
877
- else:
878
- image_submit_btn.click(
879
- fn=process_image_check,
880
- inputs=image_input,
881
- outputs=None,
882
- preprocess=False,
883
- queue=False,
884
- ).success(
885
- fn=process_pipe_image,
886
- inputs=[
887
- image_input,
888
- image_num_inference_steps,
889
- image_ensemble_size,
890
- image_processing_resolution,
891
- ],
892
- outputs=[image_output_slider, image_output_files],
893
- concurrency_limit=1,
894
- )
895
-
896
- image_reset_btn.click(
897
- fn=lambda: (
898
- None,
899
- None,
900
- None,
901
- default_image_ensemble_size,
902
- default_image_num_inference_steps,
903
- default_image_processing_resolution,
904
- ),
905
- inputs=[],
906
- outputs=[
907
- image_input,
908
- image_output_slider,
909
- image_output_files,
910
- image_ensemble_size,
911
- image_num_inference_steps,
912
- image_processing_resolution,
913
- ],
914
- queue=False,
915
- )
916
-
917
- if hf_writer is not None:
918
- image_reset_btn.click(
919
- fn=lambda: (
920
- gr.Button(value="Share", interactive=True),
921
- gr.Accordion(visible=default_share_always_show_accordion),
922
- ),
923
- inputs=[],
924
- outputs=[
925
- share_share_btn,
926
- share_box,
927
- ],
928
- queue=False,
929
- )
930
-
931
- ### Share functionality
932
-
933
- if hf_writer is not None:
934
- share_components = [
935
- image_input,
936
- image_num_inference_steps,
937
- image_ensemble_size,
938
- image_processing_resolution,
939
- image_output_slider,
940
- share_content_is_legal,
941
- share_transfer_of_rights,
942
- share_reason,
943
- ]
944
-
945
- hf_writer.setup(share_components, "shared_data")
946
- share_callback = FlagMethod(hf_writer, "Share", "", visual_feedback=True)
947
-
948
- def share_precheck(
949
- hf_content_is_legal,
950
- image_output_slider,
951
- profile: gr.OAuthProfile | None,
952
- ):
953
- if profile is None:
954
- raise gr.Error(
955
- "Log into the Space with your Hugging Face account first."
956
- )
957
- if image_output_slider is None or image_output_slider[0] is None:
958
- raise gr.Error("No output detected; process the image first.")
959
- if not hf_content_is_legal:
960
- raise gr.Error(
961
- "You must consent that the uploaded content is legal."
962
- )
963
- return gr.Button(value="Sharing in progress", interactive=False)
964
-
965
- share_share_btn.click(
966
- share_precheck,
967
- [share_content_is_legal, image_output_slider],
968
- share_share_btn,
969
- preprocess=False,
970
- queue=False,
971
- ).success(
972
- share_callback,
973
- inputs=share_components,
974
- outputs=share_share_btn,
975
- preprocess=False,
976
- queue=False,
977
- )
978
-
979
- ### Video tab
980
-
981
- video_submit_btn.click(
982
- fn=process_pipe_video,
983
- inputs=[video_input],
984
- outputs=[video_output_video, video_output_files],
985
- concurrency_limit=1,
986
- )
987
-
988
- video_reset_btn.click(
989
- fn=lambda: (None, None, None),
990
- inputs=[],
991
- outputs=[video_input, video_output_video, video_output_files],
992
- concurrency_limit=1,
993
- )
994
-
995
- ### Bas-relief tab
996
-
997
- bas_submit_btn.click(
998
- fn=process_pipe_bas,
999
- inputs=[
1000
- bas_input,
1001
- bas_plane_near,
1002
- bas_plane_far,
1003
- bas_embossing,
1004
- bas_num_inference_steps,
1005
- bas_ensemble_size,
1006
- bas_processing_resolution,
1007
- bas_size_longest_px,
1008
- bas_size_longest_cm,
1009
- bas_filter_size,
1010
- bas_frame_thickness,
1011
- bas_frame_near,
1012
- bas_frame_far,
1013
- ],
1014
- outputs=[bas_output_viewer, bas_output_files],
1015
- concurrency_limit=1,
1016
- )
1017
-
1018
- bas_reset_btn.click(
1019
- fn=lambda: (
1020
- gr.Button(interactive=True),
1021
- None,
1022
- None,
1023
- None,
1024
- default_bas_plane_near,
1025
- default_bas_plane_far,
1026
- default_bas_embossing,
1027
- default_bas_num_inference_steps,
1028
- default_bas_ensemble_size,
1029
- default_bas_processing_resolution,
1030
- default_bas_size_longest_px,
1031
- default_bas_size_longest_cm,
1032
- default_bas_filter_size,
1033
- default_bas_frame_thickness,
1034
- default_bas_frame_near,
1035
- default_bas_frame_far,
1036
- ),
1037
- inputs=[],
1038
- outputs=[
1039
- bas_submit_btn,
1040
- bas_input,
1041
- bas_output_viewer,
1042
- bas_output_files,
1043
- bas_plane_near,
1044
- bas_plane_far,
1045
- bas_embossing,
1046
- bas_num_inference_steps,
1047
- bas_ensemble_size,
1048
- bas_processing_resolution,
1049
- bas_size_longest_px,
1050
- bas_size_longest_cm,
1051
- bas_filter_size,
1052
- bas_frame_thickness,
1053
- bas_frame_near,
1054
- bas_frame_far,
1055
- ],
1056
- concurrency_limit=1,
1057
- )
1058
-
1059
- ### Server launch
1060
-
1061
- demo.queue(
1062
- api_open=False,
1063
- ).launch(
1064
- server_name="0.0.0.0",
1065
- server_port=7860,
1066
- )
1067
-
1068
-
1069
- def main():
1070
- CHECKPOINT = "prs-eth/marigold-depth-lcm-v1-0"
1071
- CROWD_DATA = "crowddata-marigold-depth-lcm-v1-0-space-v1-0"
1072
-
1073
- os.system("pip freeze")
1074
-
1075
- if "HF_TOKEN_LOGIN" in os.environ:
1076
- login(token=os.environ["HF_TOKEN_LOGIN"])
1077
-
1078
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
1079
-
1080
- pipe = MarigoldDepthPipeline.from_pretrained(
1081
- CHECKPOINT, variant="fp16", torch_dtype=torch.float16
1082
- ).to(device)
1083
- pipe.vae_tiny = diffusers.AutoencoderTiny.from_pretrained(
1084
- "madebyollin/taesd", torch_dtype=torch.float16
1085
- ).to(device)
1086
- pipe.set_progress_bar_config(disable=True)
1087
-
1088
- try:
1089
- import xformers
1090
-
1091
- pipe.enable_xformers_memory_efficient_attention()
1092
- except:
1093
- pass # run without xformers
1094
-
1095
- hf_writer = None
1096
- if "HF_TOKEN_LOGIN_WRITE_CROWD" in os.environ:
1097
- hf_writer = HuggingFaceDatasetSaver(
1098
- os.getenv("HF_TOKEN_LOGIN_WRITE_CROWD"),
1099
- CROWD_DATA,
1100
- private=True,
1101
- info_filename="dataset_info.json",
1102
- separate_dirs=True,
1103
- )
1104
-
1105
- run_demo_server(pipe, hf_writer)
1106
-
1107
-
1108
- if __name__ == "__main__":
1109
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
extrude.py DELETED
@@ -1,400 +0,0 @@
1
- # Copyright 2024 Anton Obukhov, ETH Zurich. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # --------------------------------------------------------------------------
15
- # If you find this code useful, we kindly ask you to cite our paper in your work.
16
- # Please find bibtex at: https://github.com/prs-eth/Marigold#-citation
17
- # More information about the method can be found at https://marigoldmonodepth.github.io
18
- # --------------------------------------------------------------------------
19
-
20
-
21
- import math
22
- import os
23
- import zipfile
24
-
25
- import numpy as np
26
- import pygltflib
27
- import trimesh
28
- from PIL import Image
29
- from scipy.ndimage import median_filter
30
-
31
-
32
- def quaternion_multiply(q1, q2):
33
- x1, y1, z1, w1 = q1
34
- x2, y2, z2, w2 = q2
35
- return [
36
- w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2,
37
- w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2,
38
- w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2,
39
- w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2,
40
- ]
41
-
42
-
43
- def glb_add_lights(path_input, path_output):
44
- """
45
- Adds directional lights in the horizontal plane to the glb file.
46
- :param path_input: path to input glb
47
- :param path_output: path to output glb
48
- :return: None
49
- """
50
- glb = pygltflib.GLTF2().load(path_input)
51
-
52
- N = 3 # default max num lights in Babylon.js is 4
53
- angle_step = 2 * math.pi / N
54
- elevation_angle = math.radians(75)
55
-
56
- light_colors = [
57
- [1.0, 0.0, 0.0],
58
- [0.0, 1.0, 0.0],
59
- [0.0, 0.0, 1.0],
60
- ]
61
-
62
- lights_extension = {
63
- "lights": [
64
- {"type": "directional", "color": light_colors[i], "intensity": 2.0}
65
- for i in range(N)
66
- ]
67
- }
68
-
69
- if "KHR_lights_punctual" not in glb.extensionsUsed:
70
- glb.extensionsUsed.append("KHR_lights_punctual")
71
- glb.extensions["KHR_lights_punctual"] = lights_extension
72
-
73
- light_nodes = []
74
- for i in range(N):
75
- angle = i * angle_step
76
-
77
- pos_rot = [0.0, 0.0, math.sin(angle / 2), math.cos(angle / 2)]
78
- elev_rot = [
79
- math.sin(elevation_angle / 2),
80
- 0.0,
81
- 0.0,
82
- math.cos(elevation_angle / 2),
83
- ]
84
- rotation = quaternion_multiply(pos_rot, elev_rot)
85
-
86
- node = {
87
- "rotation": rotation,
88
- "extensions": {"KHR_lights_punctual": {"light": i}},
89
- }
90
- light_nodes.append(node)
91
-
92
- light_node_indices = list(range(len(glb.nodes), len(glb.nodes) + N))
93
- glb.nodes.extend(light_nodes)
94
-
95
- root_node_index = glb.scenes[glb.scene].nodes[0]
96
- root_node = glb.nodes[root_node_index]
97
- if hasattr(root_node, "children"):
98
- root_node.children.extend(light_node_indices)
99
- else:
100
- root_node.children = light_node_indices
101
-
102
- glb.save(path_output)
103
-
104
-
105
- def extrude_depth_3d(
106
- path_rgb,
107
- path_depth,
108
- path_out_base=None,
109
- output_model_scale=100,
110
- filter_size=3,
111
- coef_near=0.0,
112
- coef_far=1.0,
113
- emboss=0.3,
114
- f_thic=0.05,
115
- f_near=-0.15,
116
- f_back=0.01,
117
- vertex_colors=True,
118
- scene_lights=True,
119
- prepare_for_3d_printing=False,
120
- zip_outputs=False,
121
- ):
122
- f_far_inner = -emboss
123
- f_far_outer = f_far_inner - f_back
124
-
125
- f_near = max(f_near, f_far_inner)
126
-
127
- depth_image = Image.open(path_depth)
128
-
129
- w, h = depth_image.size
130
- d_max = max(w, h)
131
- depth_image = np.array(depth_image).astype(np.double)
132
- depth_image = median_filter(depth_image, size=filter_size)
133
- z_min, z_max = np.min(depth_image), np.max(depth_image)
134
- depth_image = (depth_image.astype(np.double) - z_min) / (z_max - z_min)
135
- depth_image[depth_image < coef_near] = coef_near
136
- depth_image[depth_image > coef_far] = coef_far
137
- depth_image = emboss * (depth_image - coef_near) / (coef_far - coef_near)
138
- rgb_image = np.array(
139
- Image.open(path_rgb).convert("RGB").resize((w, h), Image.Resampling.LANCZOS)
140
- )
141
-
142
- w_norm = w / float(d_max - 1)
143
- h_norm = h / float(d_max - 1)
144
- w_half = w_norm / 2
145
- h_half = h_norm / 2
146
-
147
- x, y = np.meshgrid(np.arange(w), np.arange(h))
148
- x = x / float(d_max - 1) - w_half # [-w_half, w_half]
149
- y = -y / float(d_max - 1) + h_half # [-h_half, h_half]
150
- z = -depth_image # -depth_emboss (far) - 0 (near)
151
- vertices_2d = np.stack((x, y, z), axis=-1)
152
- vertices = vertices_2d.reshape(-1, 3)
153
- colors = rgb_image[:, :, :3].reshape(-1, 3) / 255.0
154
-
155
- faces = []
156
- for y in range(h - 1):
157
- for x in range(w - 1):
158
- idx = y * w + x
159
- faces.append([idx, idx + w, idx + 1])
160
- faces.append([idx + 1, idx + w, idx + 1 + w])
161
-
162
- # OUTER frame
163
-
164
- nv = len(vertices)
165
- vertices = np.append(
166
- vertices,
167
- [
168
- [-w_half - f_thic, -h_half - f_thic, f_near], # 00
169
- [-w_half - f_thic, -h_half - f_thic, f_far_outer], # 01
170
- [w_half + f_thic, -h_half - f_thic, f_near], # 02
171
- [w_half + f_thic, -h_half - f_thic, f_far_outer], # 03
172
- [w_half + f_thic, h_half + f_thic, f_near], # 04
173
- [w_half + f_thic, h_half + f_thic, f_far_outer], # 05
174
- [-w_half - f_thic, h_half + f_thic, f_near], # 06
175
- [-w_half - f_thic, h_half + f_thic, f_far_outer], # 07
176
- ],
177
- axis=0,
178
- )
179
- faces.extend(
180
- [
181
- [nv + 0, nv + 1, nv + 2],
182
- [nv + 2, nv + 1, nv + 3],
183
- [nv + 2, nv + 3, nv + 4],
184
- [nv + 4, nv + 3, nv + 5],
185
- [nv + 4, nv + 5, nv + 6],
186
- [nv + 6, nv + 5, nv + 7],
187
- [nv + 6, nv + 7, nv + 0],
188
- [nv + 0, nv + 7, nv + 1],
189
- ]
190
- )
191
- colors = np.append(colors, [[0.5, 0.5, 0.5]] * 8, axis=0)
192
-
193
- # INNER frame
194
-
195
- nv = len(vertices)
196
- vertices_left_data = vertices_2d[:, 0] # H x 3
197
- vertices_left_frame = vertices_2d[:, 0].copy() # H x 3
198
- vertices_left_frame[:, 2] = f_near
199
- vertices = np.append(vertices, vertices_left_data, axis=0)
200
- vertices = np.append(vertices, vertices_left_frame, axis=0)
201
- colors = np.append(colors, [[0.5, 0.5, 0.5]] * (2 * h), axis=0)
202
- for i in range(h - 1):
203
- nvi_d = nv + i
204
- nvi_f = nvi_d + h
205
- faces.append([nvi_d, nvi_f, nvi_d + 1])
206
- faces.append([nvi_d + 1, nvi_f, nvi_f + 1])
207
-
208
- nv = len(vertices)
209
- vertices_right_data = vertices_2d[:, -1] # H x 3
210
- vertices_right_frame = vertices_2d[:, -1].copy() # H x 3
211
- vertices_right_frame[:, 2] = f_near
212
- vertices = np.append(vertices, vertices_right_data, axis=0)
213
- vertices = np.append(vertices, vertices_right_frame, axis=0)
214
- colors = np.append(colors, [[0.5, 0.5, 0.5]] * (2 * h), axis=0)
215
- for i in range(h - 1):
216
- nvi_d = nv + i
217
- nvi_f = nvi_d + h
218
- faces.append([nvi_d, nvi_d + 1, nvi_f])
219
- faces.append([nvi_d + 1, nvi_f + 1, nvi_f])
220
-
221
- nv = len(vertices)
222
- vertices_top_data = vertices_2d[0, :] # H x 3
223
- vertices_top_frame = vertices_2d[0, :].copy() # H x 3
224
- vertices_top_frame[:, 2] = f_near
225
- vertices = np.append(vertices, vertices_top_data, axis=0)
226
- vertices = np.append(vertices, vertices_top_frame, axis=0)
227
- colors = np.append(colors, [[0.5, 0.5, 0.5]] * (2 * w), axis=0)
228
- for i in range(w - 1):
229
- nvi_d = nv + i
230
- nvi_f = nvi_d + w
231
- faces.append([nvi_d, nvi_d + 1, nvi_f])
232
- faces.append([nvi_d + 1, nvi_f + 1, nvi_f])
233
-
234
- nv = len(vertices)
235
- vertices_bottom_data = vertices_2d[-1, :] # H x 3
236
- vertices_bottom_frame = vertices_2d[-1, :].copy() # H x 3
237
- vertices_bottom_frame[:, 2] = f_near
238
- vertices = np.append(vertices, vertices_bottom_data, axis=0)
239
- vertices = np.append(vertices, vertices_bottom_frame, axis=0)
240
- colors = np.append(colors, [[0.5, 0.5, 0.5]] * (2 * w), axis=0)
241
- for i in range(w - 1):
242
- nvi_d = nv + i
243
- nvi_f = nvi_d + w
244
- faces.append([nvi_d, nvi_f, nvi_d + 1])
245
- faces.append([nvi_d + 1, nvi_f, nvi_f + 1])
246
-
247
- # FRONT frame
248
-
249
- nv = len(vertices)
250
- vertices = np.append(
251
- vertices,
252
- [
253
- [-w_half - f_thic, -h_half - f_thic, f_near],
254
- [-w_half - f_thic, h_half + f_thic, f_near],
255
- ],
256
- axis=0,
257
- )
258
- vertices = np.append(vertices, vertices_left_frame, axis=0)
259
- colors = np.append(colors, [[0.5, 0.5, 0.5]] * (2 + h), axis=0)
260
- for i in range(h - 1):
261
- faces.append([nv, nv + 2 + i + 1, nv + 2 + i])
262
- faces.append([nv, nv + 2, nv + 1])
263
-
264
- nv = len(vertices)
265
- vertices = np.append(
266
- vertices,
267
- [
268
- [w_half + f_thic, h_half + f_thic, f_near],
269
- [w_half + f_thic, -h_half - f_thic, f_near],
270
- ],
271
- axis=0,
272
- )
273
- vertices = np.append(vertices, vertices_right_frame, axis=0)
274
- colors = np.append(colors, [[0.5, 0.5, 0.5]] * (2 + h), axis=0)
275
- for i in range(h - 1):
276
- faces.append([nv, nv + 2 + i, nv + 2 + i + 1])
277
- faces.append([nv, nv + h + 1, nv + 1])
278
-
279
- nv = len(vertices)
280
- vertices = np.append(
281
- vertices,
282
- [
283
- [w_half + f_thic, h_half + f_thic, f_near],
284
- [-w_half - f_thic, h_half + f_thic, f_near],
285
- ],
286
- axis=0,
287
- )
288
- vertices = np.append(vertices, vertices_top_frame, axis=0)
289
- colors = np.append(colors, [[0.5, 0.5, 0.5]] * (2 + w), axis=0)
290
- for i in range(w - 1):
291
- faces.append([nv, nv + 2 + i, nv + 2 + i + 1])
292
- faces.append([nv, nv + 1, nv + 2])
293
-
294
- nv = len(vertices)
295
- vertices = np.append(
296
- vertices,
297
- [
298
- [-w_half - f_thic, -h_half - f_thic, f_near],
299
- [w_half + f_thic, -h_half - f_thic, f_near],
300
- ],
301
- axis=0,
302
- )
303
- vertices = np.append(vertices, vertices_bottom_frame, axis=0)
304
- colors = np.append(colors, [[0.5, 0.5, 0.5]] * (2 + w), axis=0)
305
- for i in range(w - 1):
306
- faces.append([nv, nv + 2 + i + 1, nv + 2 + i])
307
- faces.append([nv, nv + 1, nv + w + 1])
308
-
309
- # BACK frame
310
-
311
- nv = len(vertices)
312
- vertices = np.append(
313
- vertices,
314
- [
315
- [-w_half - f_thic, -h_half - f_thic, f_far_outer], # 00
316
- [w_half + f_thic, -h_half - f_thic, f_far_outer], # 01
317
- [w_half + f_thic, h_half + f_thic, f_far_outer], # 02
318
- [-w_half - f_thic, h_half + f_thic, f_far_outer], # 03
319
- ],
320
- axis=0,
321
- )
322
- faces.extend(
323
- [
324
- [nv + 0, nv + 2, nv + 1],
325
- [nv + 2, nv + 0, nv + 3],
326
- ]
327
- )
328
- colors = np.append(colors, [[0.5, 0.5, 0.5]] * 4, axis=0)
329
-
330
- trimesh_kwargs = {}
331
- if vertex_colors:
332
- trimesh_kwargs["vertex_colors"] = colors
333
- mesh = trimesh.Trimesh(vertices=vertices, faces=faces, **trimesh_kwargs)
334
-
335
- mesh.merge_vertices()
336
-
337
- current_max_dimension = max(mesh.extents)
338
- scaling_factor = output_model_scale / current_max_dimension
339
- mesh.apply_scale(scaling_factor)
340
-
341
- if prepare_for_3d_printing:
342
- rotation_mat = trimesh.transformations.rotation_matrix(
343
- np.radians(90), [-1, 0, 0]
344
- )
345
- mesh.apply_transform(rotation_mat)
346
-
347
- if path_out_base is None:
348
- path_out_base = os.path.splitext(path_depth)[0].replace("_16bit", "")
349
- path_out_glb = path_out_base + ".glb"
350
- path_out_stl = path_out_base + ".stl"
351
- path_out_obj = path_out_base + ".obj"
352
-
353
- mesh.export(path_out_glb, file_type="glb")
354
- if scene_lights:
355
- glb_add_lights(path_out_glb, path_out_glb)
356
- mesh.export(path_out_stl, file_type="stl")
357
- mesh.export(path_out_obj, file_type="obj")
358
-
359
- if zip_outputs:
360
- with zipfile.ZipFile(path_out_glb + ".zip", "w", zipfile.ZIP_DEFLATED) as zipf:
361
- arcname = os.path.basename(os.path.splitext(path_out_glb)[0]) + ".glb"
362
- zipf.write(path_out_glb, arcname=arcname)
363
- path_out_glb = path_out_glb + ".zip"
364
- with zipfile.ZipFile(path_out_stl + ".zip", "w", zipfile.ZIP_DEFLATED) as zipf:
365
- arcname = os.path.basename(os.path.splitext(path_out_stl)[0]) + ".stl"
366
- zipf.write(path_out_stl, arcname=arcname)
367
- path_out_stl = path_out_stl + ".zip"
368
- with zipfile.ZipFile(path_out_obj + ".zip", "w", zipfile.ZIP_DEFLATED) as zipf:
369
- arcname = os.path.basename(os.path.splitext(path_out_obj)[0]) + ".obj"
370
- zipf.write(path_out_obj, arcname=arcname)
371
- path_out_obj = path_out_obj + ".zip"
372
-
373
- return path_out_glb, path_out_stl, path_out_obj
374
-
375
-
376
- if __name__ == "__main__":
377
- img_rgb = "files/basrelief/einstein.jpg"
378
- img_depth = "gradio_cached_examples/examples_image/Depth outputs/54d74157894322bdc77c/einstein_depth_16bit.png"
379
- Image.open(img_rgb).resize((512, 512), Image.LANCZOS).save(
380
- "einstein_3d_tex_512.jpg"
381
- )
382
- Image.open(img_depth).convert(mode="F").resize((512, 512), Image.BILINEAR).convert(
383
- "I"
384
- ).save("einstein_3d_depth_512.png")
385
- extrude_depth_3d(
386
- "einstein_3d_tex_512.jpg",
387
- "einstein_3d_depth_512.png",
388
- path_out_base="einstein_3d_out",
389
- output_model_scale=100,
390
- filter_size=3,
391
- coef_near=0.0,
392
- coef_far=0.5,
393
- emboss=0.5,
394
- f_thic=0.05,
395
- f_near=-0.25,
396
- f_back=0.01,
397
- vertex_colors=True,
398
- scene_lights=True,
399
- prepare_for_3d_printing=True,
400
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
files/basrelief/coin.jpg DELETED

Git LFS Details

  • SHA256: d5295c5cb301ef73099e3dd91f80916e7b013f6b04d75759df57081b16a18adc
  • Pointer size: 131 Bytes
  • Size of remote file: 632 kB
files/basrelief/einstein.jpg DELETED

Git LFS Details

  • SHA256: d4a4543c0fffb2ca5ea3c17e23e88fcfcf66eae8b487173fbc5c25d0d614bdb6
  • Pointer size: 131 Bytes
  • Size of remote file: 367 kB
files/basrelief/food.jpeg DELETED

Git LFS Details

  • SHA256: a26151050a574b0dc0014e9c4806da3d6f6bc1297ee1035a16b9ace007a179af
  • Pointer size: 132 Bytes
  • Size of remote file: 1.04 MB
files/image/arc.jpeg DELETED

Git LFS Details

  • SHA256: f888e3770134e2073459026f58c568f7cf30524dd26a9182413c84b709e1b63e
  • Pointer size: 132 Bytes
  • Size of remote file: 1.01 MB
files/image/bee.jpg DELETED

Git LFS Details

  • SHA256: 7643ccdbc9550e2bf6ebdd5c768db5bc829ef719b0d1a91b4f6f9184b52f4751
  • Pointer size: 131 Bytes
  • Size of remote file: 146 kB
files/image/berries.jpeg DELETED

Git LFS Details

  • SHA256: dac1411ea48cf83b7a59c6424032f95b2ff496b3a98cdccf168bbed1c8f0aed4
  • Pointer size: 131 Bytes
  • Size of remote file: 940 kB
files/image/butterfly.jpeg DELETED

Git LFS Details

  • SHA256: e0364b8eec31d2c113c15c2b6c892754130765e8e2c960adc87d51ca5c0ea8f9
  • Pointer size: 131 Bytes
  • Size of remote file: 878 kB
files/image/cat.jpg DELETED

Git LFS Details

  • SHA256: 794796a86e56a4b372287661dc934daa2d15e988d01afe88afc50b32644c007a
  • Pointer size: 131 Bytes
  • Size of remote file: 236 kB
files/image/concert.jpeg DELETED

Git LFS Details

  • SHA256: fc746e234cb8a3e483999ee4c4f4d22b4e6c48cb2655eaa47c0936f3a37b61dc
  • Pointer size: 131 Bytes
  • Size of remote file: 420 kB
files/image/dog.jpeg DELETED

Git LFS Details

  • SHA256: c932a965dfe63c8c6dbc1bb48f7ea245a6a6dd2fb40fd243545e908b3aa7aa62
  • Pointer size: 131 Bytes
  • Size of remote file: 672 kB
files/image/doughnuts.jpeg DELETED

Git LFS Details

  • SHA256: 2ede4170b4a17f0c076c1a336eb4d3c03d64688997a986e3a8101972016b799a
  • Pointer size: 131 Bytes
  • Size of remote file: 607 kB
files/image/einstein.jpg DELETED

Git LFS Details

  • SHA256: d4a4543c0fffb2ca5ea3c17e23e88fcfcf66eae8b487173fbc5c25d0d614bdb6
  • Pointer size: 131 Bytes
  • Size of remote file: 367 kB
files/image/food.jpeg DELETED

Git LFS Details

  • SHA256: a26151050a574b0dc0014e9c4806da3d6f6bc1297ee1035a16b9ace007a179af
  • Pointer size: 132 Bytes
  • Size of remote file: 1.04 MB
files/image/glasses.jpeg DELETED

Git LFS Details

  • SHA256: de8c0c20adb7c187357c21e467d3f178888574962027cdd366c390b63913ffec
  • Pointer size: 131 Bytes
  • Size of remote file: 677 kB
files/image/house.jpg DELETED

Git LFS Details

  • SHA256: 4087027e84a6323099fc839fd0b6816fd614814e92d12df21051cff3ed472819
  • Pointer size: 133 Bytes
  • Size of remote file: 14.9 MB
files/image/lake.jpeg DELETED

Git LFS Details

  • SHA256: 181dc0f684f0f3b94bc4bec829becd3dec817f69032731edf55ee8370c6898f0
  • Pointer size: 132 Bytes
  • Size of remote file: 1.03 MB
files/image/marigold.jpeg DELETED

Git LFS Details

  • SHA256: 575c1a7bc1199d86b5ec305b4efc12286842dee4a189e8699dcf8a6d0276807c
  • Pointer size: 131 Bytes
  • Size of remote file: 416 kB
files/image/portrait_1.jpeg DELETED

Git LFS Details

  • SHA256: 76e3ad74311975f0db43cdebd4202d1464e19b6950cc3e7c5aa0a160f95493c3
  • Pointer size: 131 Bytes
  • Size of remote file: 506 kB
files/image/portrait_2.jpeg DELETED

Git LFS Details

  • SHA256: 805ad1127b0d9d09068df70e3ab7aa7450ff802fa5464db8430787dfee1ec6a0
  • Pointer size: 131 Bytes
  • Size of remote file: 525 kB
files/image/pumpkins.jpg DELETED

Git LFS Details

  • SHA256: 92f03bc05dc882231bce735f2afb8c27eb9d0616166abe3794b39ff24314fd0a
  • Pointer size: 133 Bytes
  • Size of remote file: 11.3 MB
files/image/puzzle.jpeg DELETED

Git LFS Details

  • SHA256: 60b66432124a0936c6143301a9f9b793af4184bc9340c567d11fdd5a22cc98cc
  • Pointer size: 131 Bytes
  • Size of remote file: 374 kB
files/image/road.jpg DELETED

Git LFS Details

  • SHA256: 58bb01aea37f6e1206260eddb6d003589d779e8b3fb3ef0a0f1e2e38a8fa3925
  • Pointer size: 133 Bytes
  • Size of remote file: 13.1 MB
files/image/scientists.jpg DELETED

Git LFS Details

  • SHA256: 7b164dfbc4ab6e491ce81972b8c0e076fdc4af622289d0aa3cb43ee3c2be4030
  • Pointer size: 131 Bytes
  • Size of remote file: 444 kB
files/image/surfboards.jpeg DELETED

Git LFS Details

  • SHA256: 326f9ffd3b85b29b971205eb87c2d0c9b5e4409b496be1eb961b46d5f7c5d6c6
  • Pointer size: 132 Bytes
  • Size of remote file: 1.16 MB
files/image/surfer.jpeg DELETED

Git LFS Details

  • SHA256: 52827abf2c3951b752d4e58c88fff7ab907672c58fda70b813df3922650c7495
  • Pointer size: 132 Bytes
  • Size of remote file: 1.01 MB
files/image/swings.jpg DELETED

Git LFS Details

  • SHA256: cae2ac669c948313eae8aca53017f10b64b42f87c53b9c34639962b218fdf1f1
  • Pointer size: 131 Bytes
  • Size of remote file: 353 kB
files/image/switzerland.jpeg DELETED

Git LFS Details

  • SHA256: 81e35ba90f7736167ea3e8a0a58f932ecded07b00b012a5bd7df5dabbe0eb3ce
  • Pointer size: 131 Bytes
  • Size of remote file: 847 kB
files/image/teamwork.jpeg DELETED

Git LFS Details

  • SHA256: 3cd48af8f3db4d89760cd6f40f2716570e697ae74a9bd88ed1ba36c0e68326b3
  • Pointer size: 131 Bytes
  • Size of remote file: 700 kB
files/image/wave.jpeg DELETED

Git LFS Details

  • SHA256: 7f14e77f7990d75104d6e3447077eb176d6437c58f5fb0fffcdb6015193b2d03
  • Pointer size: 132 Bytes
  • Size of remote file: 1.07 MB
files/video/cab.mp4 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7857328de30257e2985e0218e18e35f0dbc6ca9dd9f89b28687881f13ca0a4a
3
- size 3268179
 
 
 
 
files/video/elephant.mp4 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d198ec2e3e5a308c5eeb18c9f3a882f6c5812d329d9e8497e1bf79ff466dd84
3
- size 3078416
 
 
 
 
files/video/obama.mp4 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4aa0ac19460e0966139247cc180f98398fb11a35e3ca5c90cb70f0c4704904de
3
- size 955458
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/0f57994f5d6ac12c1020/food_depth_512.glb.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2343e8ab8c40539f3d4cdb6bde4af13964b989af33d1993eb2c3edd9e3822950
3
- size 2089619
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/127d9bcaf03fa5f41dd3/food_depth_512.stl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:95a5d9c1fd336e1e73ed3bcd388ec980da90b4ba601ce999839ab44baca5abb3
3
- size 6527220
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/96a98e08d96fd47e5cc6/einstein_depth_512.obj.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5b62efa853cbba11eba28f99307b40dfca300ec8047f2d778db92b3d3417d81
3
- size 5977474
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/a17995f3d4750a0e0bbc/food_depth_512.obj.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fe92c1762da36b2d9fbe1419d5f91fc9cc64f739de6dc2b122fb1f1c6ca3e7e
3
- size 4044632
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/b0b93bdcbedf077307ba/coin_depth_512.stl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ac5e87a94275d6df74d5a7a42672cb97fbf29a3c2a6caf8dbc3b979641fea560
3
- size 7434540
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/c7499e9097e58b706e51/einstein_depth_512.glb.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2b7641a5d61736e66868e895d1c97b0f15458d694e1164cc484bdea961a309f
3
- size 2977145
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/ebe8a8d03fbc1a1fc2bd/coin_depth_512.glb.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:abb6853f1086ad891ffd7407f3b913f2019f6c28256065f1ae94145c374ec220
3
- size 3521996
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/ee9ee048f590c0c9a2c8/einstein_depth_512.stl.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5bf0df4560b958ab8f45cda1cd86a989d0506fbc1ab4c7ff37037a200ac4fd4
3
- size 8353042
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/fbaa26ffc2eb3654c177/coin_depth_512.obj.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bce06188d1a22e3a6b6b339cca62a2ea59da00925ea6c19bc580bf56250323a
3
- size 5631807
 
 
 
 
gradio_cached_examples/examples_bas/3D preview low-res relief highlight/78ff2a583036eab8fe9b/coin_depth_256.glb DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ae49b26ba4187dc3704cc00040c5671f1088a131f4796768fa86ec3eb67c1a1
3
- size 2369864
 
 
 
 
gradio_cached_examples/examples_bas/3D preview low-res relief highlight/8feb5fe1e8941c880c40/food_depth_256.glb DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb7df74b49b34e916945c7bd31b45d6359db2ebb3d0bbb0e2fdd9d9222fb0816
3
- size 1598968
 
 
 
 
gradio_cached_examples/examples_bas/3D preview low-res relief highlight/bb26fd8a9d7890806329/einstein_depth_256.glb DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:88372996a41fccdc6e03b77edb2542fb75aaa1623b25c3cbf4a65de61131d0d3
3
- size 2397708
 
 
 
 
gradio_cached_examples/examples_bas/log.csv DELETED
@@ -1,4 +0,0 @@
1
- "3D preview (low-res, relief highlight)",3D model outputs (high-res),flag,username,timestamp
2
- "{""path"": ""gradio_cached_examples/examples_bas/3D preview low-res relief highlight/78ff2a583036eab8fe9b/coin_depth_256.glb"", ""url"": ""/file=/tmp/gradio/75f3f8661319bbfa07a73e3c6aad5381af2bb662/coin_depth_256.glb"", ""size"": null, ""orig_name"": ""coin_depth_256.glb"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}","[{""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/ebe8a8d03fbc1a1fc2bd/coin_depth_512.glb.zip"", ""url"": ""/file=/tmp/gradio/8d501a8d9351d4ac105f330018213f88c8ab46e2/coin_depth_512.glb.zip"", ""size"": 3521996, ""orig_name"": ""coin_depth_512.glb.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/b0b93bdcbedf077307ba/coin_depth_512.stl.zip"", ""url"": ""/file=/tmp/gradio/67269f9efe556613199b0e4e587db71ecaf273e7/coin_depth_512.stl.zip"", ""size"": 7434540, ""orig_name"": ""coin_depth_512.stl.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/fbaa26ffc2eb3654c177/coin_depth_512.obj.zip"", ""url"": ""/file=/tmp/gradio/3e0dc5d754e113c9553b04644fe50279a65ad85d/coin_depth_512.obj.zip"", ""size"": 5631807, ""orig_name"": ""coin_depth_512.obj.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}]",,,2024-06-02 01:52:32.122923
3
- "{""path"": ""gradio_cached_examples/examples_bas/3D preview low-res relief highlight/bb26fd8a9d7890806329/einstein_depth_256.glb"", ""url"": ""/file=/tmp/gradio/d72948c76f50b644cfd2bc4f2eb68ec4d1dfd6b3/einstein_depth_256.glb"", ""size"": null, ""orig_name"": ""einstein_depth_256.glb"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}","[{""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/c7499e9097e58b706e51/einstein_depth_512.glb.zip"", ""url"": ""/file=/tmp/gradio/1d0bac2ed809c2c3d7ffecf0a40ca8f8398521da/einstein_depth_512.glb.zip"", ""size"": 2977145, ""orig_name"": ""einstein_depth_512.glb.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/ee9ee048f590c0c9a2c8/einstein_depth_512.stl.zip"", ""url"": ""/file=/tmp/gradio/3eb2701225d3b2362b3fc702c7b02bf2d9072308/einstein_depth_512.stl.zip"", ""size"": 8353042, ""orig_name"": ""einstein_depth_512.stl.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/96a98e08d96fd47e5cc6/einstein_depth_512.obj.zip"", ""url"": ""/file=/tmp/gradio/063a0b23c678e16f689b0bd8e3b6784e084a283f/einstein_depth_512.obj.zip"", ""size"": 5977474, ""orig_name"": ""einstein_depth_512.obj.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}]",,,2024-06-02 01:52:37.086710
4
- "{""path"": ""gradio_cached_examples/examples_bas/3D preview low-res relief highlight/8feb5fe1e8941c880c40/food_depth_256.glb"", ""url"": ""/file=/tmp/gradio/32d5f81d5bff33dcbf73bf5b7f46e23f9f73e0e3/food_depth_256.glb"", ""size"": null, ""orig_name"": ""food_depth_256.glb"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}","[{""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/0f57994f5d6ac12c1020/food_depth_512.glb.zip"", ""url"": ""/file=/tmp/gradio/c7790f4982c192c3fe02d99143d293d0c6c28bc3/food_depth_512.glb.zip"", ""size"": 2089619, ""orig_name"": ""food_depth_512.glb.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/127d9bcaf03fa5f41dd3/food_depth_512.stl.zip"", ""url"": ""/file=/tmp/gradio/cdfe46cad85481cdf6472efdddfdc6cca3fad3f4/food_depth_512.stl.zip"", ""size"": 6527220, ""orig_name"": ""food_depth_512.stl.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/a17995f3d4750a0e0bbc/food_depth_512.obj.zip"", ""url"": ""/file=/tmp/gradio/9f3cac3ebaef08375d5558783cb66009842b5a38/food_depth_512.obj.zip"", ""size"": 4044632, ""orig_name"": ""food_depth_512.obj.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}]",,,2024-06-02 01:52:42.365773