narain
commited on
Commit
·
122fab2
1
Parent(s):
b2ac73a
add applitcation
Browse files
app.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
from PIL import Image
|
6 |
+
from transformers import AutoImageProcessor, AutoModelForDepthEstimation
|
7 |
+
|
8 |
+
# Load depth estimation model
|
9 |
+
image_processor = AutoImageProcessor.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf")
|
10 |
+
depth_model = AutoModelForDepthEstimation.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf")
|
11 |
+
|
12 |
+
def apply_blur(image, blur_type, blur_strength, depth_threshold):
|
13 |
+
# Convert image to RGB
|
14 |
+
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
15 |
+
|
16 |
+
# Depth estimation
|
17 |
+
pil_image = Image.fromarray(img)
|
18 |
+
inputs = image_processor(images=pil_image, return_tensors="pt")
|
19 |
+
|
20 |
+
with torch.no_grad():
|
21 |
+
outputs = depth_model(**inputs)
|
22 |
+
predicted_depth = outputs.predicted_depth
|
23 |
+
|
24 |
+
# Interpolate to original size
|
25 |
+
prediction = torch.nn.functional.interpolate(
|
26 |
+
predicted_depth.unsqueeze(1),
|
27 |
+
size=img.shape[:2],
|
28 |
+
mode="bicubic",
|
29 |
+
align_corners=False,
|
30 |
+
)
|
31 |
+
|
32 |
+
# Create mask based on depth threshold
|
33 |
+
mask = prediction[0, 0, :, :].detach().cpu().numpy() < depth_threshold
|
34 |
+
mask = mask.astype(np.uint8)
|
35 |
+
mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2) * 255
|
36 |
+
|
37 |
+
# Apply blur based on selected type
|
38 |
+
if blur_type == "Gaussian":
|
39 |
+
blurred_image = cv2.GaussianBlur(img, (0, 0), sigmaX=blur_strength)
|
40 |
+
elif blur_type == "Lens":
|
41 |
+
# Simulate lens blur using a larger kernel
|
42 |
+
kernel_size = int(blur_strength * 2) * 2 + 1
|
43 |
+
blurred_image = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
|
44 |
+
|
45 |
+
# Combine blurred and original images using the mask
|
46 |
+
output = np.where(mask == 255, img, blurred_image)
|
47 |
+
|
48 |
+
return output
|
49 |
+
|
50 |
+
# Define Gradio interface
|
51 |
+
iface = gr.Interface(
|
52 |
+
fn=apply_blur,
|
53 |
+
inputs=[
|
54 |
+
gr.Image(label="Input Image"),
|
55 |
+
gr.Radio(["Gaussian", "Lens"], label="Blur Type"),
|
56 |
+
gr.Slider(1, 30, value=15, step=1, label="Blur Strength"),
|
57 |
+
gr.Slider(1, 10, value=3, step=0.1, label="Depth Threshold")
|
58 |
+
],
|
59 |
+
outputs=gr.Image(label="Output Image"),
|
60 |
+
title="Image Segmentation and Blurring",
|
61 |
+
description="Upload an image and apply Gaussian or Lens blur to the background based on depth estimation."
|
62 |
+
)
|
63 |
+
|
64 |
+
# Launch the app
|
65 |
+
iface.launch()
|