Spaces:
Running
Running
File size: 2,812 Bytes
029b4b5 679bbb3 029b4b5 679bbb3 029b4b5 679bbb3 029b4b5 679bbb3 029b4b5 02acfac 029b4b5 679bbb3 029b4b5 679bbb3 3295ec4 029b4b5 679bbb3 3295ec4 02acfac 3295ec4 029b4b5 3295ec4 679bbb3 029b4b5 68aeb60 cc376eb 029b4b5 cc376eb 3295ec4 02acfac 029b4b5 70d73db b547b0c 029b4b5 cc376eb 029b4b5 cc376eb 029b4b5 68aeb60 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
from doctest import Example
import gradio as gr
from transformers import DPTImageProcessor, DPTForDepthEstimation
import torch
import numpy as np
from PIL import Image, ImageOps
from pathlib import Path
import glob
from autostereogram.converter import StereogramConverter
from datetime import datetime
import time
import tempfile
feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-large")
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
stereo_converter = StereogramConverter()
def process_image(image_path):
print("\n\n\n")
print("Processing image:", image_path)
last_time = time.time()
image_raw = Image.open(Path(image_path))
image = image_raw.resize(
(1280, int(1280 * image_raw.size[1] / image_raw.size[0])),
Image.Resampling.LANCZOS,
)
# prepare image for the model
encoding = feature_extractor(image, return_tensors="pt")
# forward pass
with torch.no_grad():
outputs = model(**encoding)
predicted_depth = outputs.predicted_depth
# interpolate to original size
prediction = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1),
size=image.size[::-1],
mode="bicubic",
align_corners=False,
).squeeze()
output = prediction.cpu().numpy()
depth_image = (output * 255 / np.max(output)).astype("uint8")
depth_image_padded = np.array(
ImageOps.pad(Image.fromarray(depth_image), (1280, 720))
)
return depth_image_padded
examples_images = [[f] for f in sorted(glob.glob("examples/*.jpg"))]
with gr.Blocks() as blocks:
gr.Markdown(
"""
## Depth Image to Autostereogram (Magic Eye)
This demo is a variation from the original [DPT Demo](https://huggingface.co/spaces/nielsr/dpt-depth-estimation).
Zero-shot depth estimation from an image, then it uses [pystereogram](https://github.com/yxiao1996/pystereogram)
to generate the autostereogram (Magic Eye)
<base target="_blank">
"""
)
with gr.Row():
with gr.Column():
input_image = gr.Image(type="filepath", label="Input Image")
button = gr.Button("Predict")
with gr.Column():
predicted_depth = gr.Image(label="Predicted Depth", type="pil")
with gr.Row():
autostereogram = gr.Image(label="Autostereogram", type="pil")
with gr.Row():
with gr.Column():
file_download = gr.File(label="Download Image")
with gr.Row():
gr.Examples(
examples=examples_images,
fn=process_image,
inputs=[input_image],
outputs=predicted_depth],
cache_examples=True,
)
button.click(
fn=process_image,
inputs=[input_image],
outputs=predicted_depth],
)
blocks.launch(debug=True)
|