|
import gradio as gr |
|
import cv2 |
|
import numpy as np |
|
import torch |
|
from PIL import Image |
|
from transformers import AutoImageProcessor, AutoModelForDepthEstimation |
|
|
|
|
|
image_processor = AutoImageProcessor.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf") |
|
depth_model = AutoModelForDepthEstimation.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf") |
|
|
|
def apply_blur(image, blur_type, blur_strength, depth_threshold): |
|
|
|
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
pil_image = Image.fromarray(img) |
|
inputs = image_processor(images=pil_image, return_tensors="pt") |
|
|
|
with torch.no_grad(): |
|
outputs = depth_model(**inputs) |
|
predicted_depth = outputs.predicted_depth |
|
|
|
|
|
prediction = torch.nn.functional.interpolate( |
|
predicted_depth.unsqueeze(1), |
|
size=img.shape[:2], |
|
mode="bicubic", |
|
align_corners=False, |
|
) |
|
|
|
|
|
mask = prediction[0, 0, :, :].detach().cpu().numpy() < depth_threshold |
|
mask = mask.astype(np.uint8) |
|
mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2) * 255 |
|
|
|
|
|
if blur_type == "Gaussian": |
|
blurred_image = cv2.GaussianBlur(img, (0, 0), sigmaX=blur_strength) |
|
elif blur_type == "Lens": |
|
|
|
kernel_size = int(blur_strength * 2) * 2 + 1 |
|
blurred_image = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) |
|
|
|
|
|
output = np.where(mask == 255, img, blurred_image) |
|
|
|
return output |
|
|
|
|
|
iface = gr.Interface( |
|
fn=apply_blur, |
|
inputs=[ |
|
gr.Image(label="Input Image"), |
|
gr.Radio(["Gaussian", "Lens"], label="Blur Type"), |
|
gr.Slider(1, 30, value=15, step=1, label="Blur Strength"), |
|
gr.Slider(1, 10, value=3, step=0.1, label="Depth Threshold") |
|
], |
|
outputs=gr.Image(label="Output Image"), |
|
title="Image Segmentation and Blurring", |
|
description="Upload an image and apply Gaussian or Lens blur to the background based on depth estimation." |
|
) |
|
|
|
|
|
iface.launch(share=True) |
|
|