Spaces:
Paused
Paused
Set batch size to 4
Browse files
app.py
CHANGED
@@ -12,7 +12,8 @@ from PIL import Image, ImageFilter, ImageOps
|
|
12 |
DEVICE = "cuda"
|
13 |
|
14 |
MAX_SEED = np.iinfo(np.int32).max
|
15 |
-
FIXED_DIMENSION = 900
|
|
|
16 |
FIXED_DIMENSION = (FIXED_DIMENSION // 16) * 16
|
17 |
|
18 |
SYSTEM_PROMPT = r"""This two-panel split-frame image showcases a furniture in as a product shot versus styled in a room.
|
@@ -119,14 +120,14 @@ def infer(
|
|
119 |
mask = ImageOps.invert(mask)
|
120 |
# Blur the mask
|
121 |
mask = mask.filter(ImageFilter.GaussianBlur(radius=10))
|
122 |
-
# Convert to
|
123 |
-
mask = mask.convert("
|
124 |
|
125 |
if randomize_seed:
|
126 |
seed = secrets.randbelow(MAX_SEED)
|
127 |
|
128 |
prompt = prompt + ".\n" + SYSTEM_PROMPT if prompt else SYSTEM_PROMPT
|
129 |
-
batch_size =
|
130 |
results_images = pipe(
|
131 |
prompt=[prompt] * batch_size,
|
132 |
image=[image] * batch_size,
|
@@ -138,6 +139,8 @@ def infer(
|
|
138 |
generator=torch.Generator("cpu").manual_seed(seed),
|
139 |
)["images"]
|
140 |
|
|
|
|
|
141 |
cropped_images = [
|
142 |
image.crop((FIXED_DIMENSION, 0, FIXED_DIMENSION * 2, FIXED_DIMENSION))
|
143 |
for image in results_images
|
@@ -196,7 +199,6 @@ with gr.Blocks(css=css) as demo:
|
|
196 |
show_label=False,
|
197 |
columns=2,
|
198 |
height=600,
|
199 |
-
preview=True,
|
200 |
)
|
201 |
|
202 |
with gr.Accordion("Advanced Settings", open=False):
|
|
|
12 |
DEVICE = "cuda"
|
13 |
|
14 |
MAX_SEED = np.iinfo(np.int32).max
|
15 |
+
# FIXED_DIMENSION = 900
|
16 |
+
FIXED_DIMENSION = 512 + (512 // 2)
|
17 |
FIXED_DIMENSION = (FIXED_DIMENSION // 16) * 16
|
18 |
|
19 |
SYSTEM_PROMPT = r"""This two-panel split-frame image showcases a furniture in as a product shot versus styled in a room.
|
|
|
120 |
mask = ImageOps.invert(mask)
|
121 |
# Blur the mask
|
122 |
mask = mask.filter(ImageFilter.GaussianBlur(radius=10))
|
123 |
+
# Convert to 3 channel
|
124 |
+
mask = mask.convert("RGB")
|
125 |
|
126 |
if randomize_seed:
|
127 |
seed = secrets.randbelow(MAX_SEED)
|
128 |
|
129 |
prompt = prompt + ".\n" + SYSTEM_PROMPT if prompt else SYSTEM_PROMPT
|
130 |
+
batch_size = 4
|
131 |
results_images = pipe(
|
132 |
prompt=[prompt] * batch_size,
|
133 |
image=[image] * batch_size,
|
|
|
139 |
generator=torch.Generator("cpu").manual_seed(seed),
|
140 |
)["images"]
|
141 |
|
142 |
+
print(len(results_images))
|
143 |
+
|
144 |
cropped_images = [
|
145 |
image.crop((FIXED_DIMENSION, 0, FIXED_DIMENSION * 2, FIXED_DIMENSION))
|
146 |
for image in results_images
|
|
|
199 |
show_label=False,
|
200 |
columns=2,
|
201 |
height=600,
|
|
|
202 |
)
|
203 |
|
204 |
with gr.Accordion("Advanced Settings", open=False):
|