blanchon commited on
Commit
e4abc7b
·
1 Parent(s): 9f7976a

Fix resize black gap

Browse files
Files changed (1) hide show
  1. app.py +3 -16
app.py CHANGED
@@ -13,6 +13,7 @@ DEVICE = "cuda"
13
 
14
  MAX_SEED = np.iinfo(np.int32).max
15
  FIXED_DIMENSION = 900
 
16
 
17
  SYSTEM_PROMPT = r"""This two-panel split-frame image showcases a furniture in as a product shot versus styled in a room.
18
  [LEFT] standalone product shot image the furniture on a white background.
@@ -46,15 +47,6 @@ else:
46
  pipe.to(DEVICE)
47
 
48
 
49
- def calculate_optimal_dimensions(image: Image.Image) -> tuple[int, int]:
50
- width, height = image.size
51
- # Ensure dimensions are multiples of 16
52
- width = (width // 16) * 16
53
- height = (height // 16) * 16
54
-
55
- return int(width), int(height)
56
-
57
-
58
  @spaces.GPU
59
  def infer(
60
  furniture_image: Image.Image,
@@ -123,11 +115,6 @@ def infer(
123
  )
124
  mask.paste(_furniture_mask, (0, 0))
125
  mask.paste(_room_mask, (FIXED_DIMENSION, 0), _room_mask)
126
-
127
- width, height = calculate_optimal_dimensions(image)
128
- # Resize the image and mask to the optimal dimensions for the VAe
129
- image = image.resize((width, height))
130
- mask = mask.resize((width, height))
131
  # Invert the mask
132
  mask = ImageOps.invert(mask)
133
  # Blur the mask
@@ -142,8 +129,8 @@ def infer(
142
  prompt=[prompt] * batch_size,
143
  image=[image] * batch_size,
144
  mask_image=[mask] * batch_size,
145
- height=height,
146
- width=width,
147
  guidance_scale=guidance_scale,
148
  num_inference_steps=num_inference_steps,
149
  generator=torch.Generator("cpu").manual_seed(seed),
 
13
 
14
  MAX_SEED = np.iinfo(np.int32).max
15
  FIXED_DIMENSION = 900
16
+ FIXED_DIMENSION = (FIXED_DIMENSION // 16) * 16
17
 
18
  SYSTEM_PROMPT = r"""This two-panel split-frame image showcases a furniture in as a product shot versus styled in a room.
19
  [LEFT] standalone product shot image the furniture on a white background.
 
47
  pipe.to(DEVICE)
48
 
49
 
 
 
 
 
 
 
 
 
 
50
  @spaces.GPU
51
  def infer(
52
  furniture_image: Image.Image,
 
115
  )
116
  mask.paste(_furniture_mask, (0, 0))
117
  mask.paste(_room_mask, (FIXED_DIMENSION, 0), _room_mask)
 
 
 
 
 
118
  # Invert the mask
119
  mask = ImageOps.invert(mask)
120
  # Blur the mask
 
129
  prompt=[prompt] * batch_size,
130
  image=[image] * batch_size,
131
  mask_image=[mask] * batch_size,
132
+ height=FIXED_DIMENSION,
133
+ width=FIXED_DIMENSION * 2,
134
  guidance_scale=guidance_scale,
135
  num_inference_steps=num_inference_steps,
136
  generator=torch.Generator("cpu").manual_seed(seed),