Spaces:
Running
Running
remove spaces references - run on hugging face as cpuremove spaces references - run on hugging face as cpu
Browse files- README.md +2 -2
- app.py +2 -6
- thumbnail.png +3 -0
README.md
CHANGED
|
@@ -6,7 +6,7 @@ colorFrom: yellow
|
|
| 6 |
colorTo: red
|
| 7 |
python_version: 3.10.13
|
| 8 |
sdk: gradio
|
| 9 |
-
sdk_version: 5.
|
| 10 |
app_file: app.py
|
| 11 |
license: apache-2.0
|
| 12 |
hf_oauth: true
|
|
@@ -15,7 +15,7 @@ tags:
|
|
| 15 |
- depth
|
| 16 |
- 3d
|
| 17 |
thumbnail: >-
|
| 18 |
-
https://cdn-uploads.huggingface.co/production/uploads/6346595c9e5f0fe83fc60444/
|
| 19 |
---
|
| 20 |
|
| 21 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
| 6 |
colorTo: red
|
| 7 |
python_version: 3.10.13
|
| 8 |
sdk: gradio
|
| 9 |
+
sdk_version: 5.17.0
|
| 10 |
app_file: app.py
|
| 11 |
license: apache-2.0
|
| 12 |
hf_oauth: true
|
|
|
|
| 15 |
- depth
|
| 16 |
- 3d
|
| 17 |
thumbnail: >-
|
| 18 |
+
https://cdn-uploads.huggingface.co/production/uploads/6346595c9e5f0fe83fc60444/cyAmkkrQsfDjolwuHb4ZX.png
|
| 19 |
---
|
| 20 |
|
| 21 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
app.py
CHANGED
|
@@ -10,11 +10,8 @@ from transformers import DPTForDepthEstimation, DPTImageProcessor
|
|
| 10 |
|
| 11 |
# Initialize the image processor and depth estimation model
|
| 12 |
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
|
| 13 |
-
|
| 14 |
|
| 15 |
-
import spaces
|
| 16 |
-
|
| 17 |
-
@spaces.GPU(duration=90,progress=gr.Progress(track_tqdm=True))
|
| 18 |
def process_image(image_path, resized_width=800, z_scale=208):
|
| 19 |
"""
|
| 20 |
Processes the input image to generate a depth map and a 3D mesh reconstruction.
|
|
@@ -41,7 +38,7 @@ def process_image(image_path, resized_width=800, z_scale=208):
|
|
| 41 |
|
| 42 |
# Perform depth estimation
|
| 43 |
with torch.no_grad():
|
| 44 |
-
outputs =
|
| 45 |
predicted_depth = outputs.predicted_depth
|
| 46 |
|
| 47 |
# Interpolate depth to match the image size
|
|
@@ -72,7 +69,6 @@ def process_image(image_path, resized_width=800, z_scale=208):
|
|
| 72 |
torch.cuda.ipc_collect()
|
| 73 |
return [img, gltf_path, gltf_path]
|
| 74 |
|
| 75 |
-
@spaces.GPU()
|
| 76 |
def create_3d_obj(rgb_image, raw_depth, image_path, depth=10, z_scale=200):
|
| 77 |
"""
|
| 78 |
Creates a 3D object from RGB and depth images.
|
|
|
|
| 10 |
|
| 11 |
# Initialize the image processor and depth estimation model
|
| 12 |
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
|
| 13 |
+
depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large", ignore_mismatched_sizes=True)
|
| 14 |
|
|
|
|
|
|
|
|
|
|
| 15 |
def process_image(image_path, resized_width=800, z_scale=208):
|
| 16 |
"""
|
| 17 |
Processes the input image to generate a depth map and a 3D mesh reconstruction.
|
|
|
|
| 38 |
|
| 39 |
# Perform depth estimation
|
| 40 |
with torch.no_grad():
|
| 41 |
+
outputs = depth_model(**encoding)
|
| 42 |
predicted_depth = outputs.predicted_depth
|
| 43 |
|
| 44 |
# Interpolate depth to match the image size
|
|
|
|
| 69 |
torch.cuda.ipc_collect()
|
| 70 |
return [img, gltf_path, gltf_path]
|
| 71 |
|
|
|
|
| 72 |
def create_3d_obj(rgb_image, raw_depth, image_path, depth=10, z_scale=200):
|
| 73 |
"""
|
| 74 |
Creates a 3D object from RGB and depth images.
|
thumbnail.png
ADDED
|
|
Git LFS Details
|