Ariamehr commited on
Commit
7677377
·
verified ·
1 Parent(s): fd0b130

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -1
app.py CHANGED
@@ -7,7 +7,7 @@ import torch
7
  from gradio.themes.utils import sizes
8
  from matplotlib import pyplot as plt
9
  from matplotlib.patches import Patch
10
- from PIL import Image
11
  from torchvision import transforms
12
 
13
  # ----------------- HELPER FUNCTIONS ----------------- #
@@ -161,7 +161,29 @@ transform_fn = transforms.Compose(
161
 
162
  # ----------------- CORE FUNCTION ----------------- #
163
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  def segment(image: Image.Image) -> Image.Image:
 
 
165
  input_tensor = transform_fn(image).unsqueeze(0)
166
  preds = run_model(input_tensor, height=image.height, width=image.width)
167
  mask = preds.squeeze(0).cpu().numpy()
 
7
  from gradio.themes.utils import sizes
8
  from matplotlib import pyplot as plt
9
  from matplotlib.patches import Patch
10
+ from PIL import Image, ImageOps
11
  from torchvision import transforms
12
 
13
  # ----------------- HELPER FUNCTIONS ----------------- #
 
161
 
162
  # ----------------- CORE FUNCTION ----------------- #
163
 
164
+ def resize_and_pad(image: Image.Image, target_size=(768, 1024)):
165
+ img_ratio = image.width / image.height
166
+ target_ratio = target_size[0] / target_size[1]
167
+
168
+ if img_ratio > target_ratio:
169
+ new_width = target_size[0]
170
+ new_height = int(target_size[0] / img_ratio)
171
+ else:
172
+ new_height = target_size[1]
173
+ new_width = int(target_size[1] * img_ratio)
174
+
175
+ resized_image = image.resize((new_width, new_height), Image.ANTIALIAS)
176
+
177
+ delta_w = target_size[0] - new_width
178
+ delta_h = target_size[1] - new_height
179
+ padding = (delta_w // 2, delta_h // 2, delta_w - (delta_w // 2), delta_h - (delta_h // 2))
180
+ padded_image = ImageOps.expand(resized_image, padding, fill="black")
181
+
182
+ return padded_image
183
+
184
  def segment(image: Image.Image) -> Image.Image:
185
+ image = resize_and_pad(image, target_size=(768, 1024))
186
+
187
  input_tensor = transform_fn(image).unsqueeze(0)
188
  preds = run_model(input_tensor, height=image.height, width=image.width)
189
  mask = preds.squeeze(0).cpu().numpy()