Spaces:
Running
on
Zero
Running
on
Zero
fix ui change not working
Browse files
app.py
CHANGED
@@ -84,47 +84,67 @@ def generate_single_image(
|
|
84 |
@spaces.GPU(duration=80)
|
85 |
def generate_arena_images(
|
86 |
prompt,
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
model_choice_A,
|
95 |
model_choice_B,
|
96 |
use_same_settings,
|
97 |
progress=gr.Progress(track_tqdm=True),
|
98 |
):
|
99 |
-
if
|
100 |
-
|
|
|
|
|
101 |
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
# Generate images for both models
|
105 |
images_A = generate_single_image(
|
106 |
prompt,
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
model_choice_A,
|
115 |
-
|
116 |
)
|
117 |
images_B = generate_single_image(
|
118 |
prompt,
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
model_choice_B,
|
127 |
-
|
128 |
)
|
129 |
|
130 |
return images_A, images_B
|
@@ -216,7 +236,7 @@ with gr.Blocks(css=css) as demo:
|
|
216 |
with gr.Accordion("Advanced options", open=False):
|
217 |
use_same_settings = gr.Checkbox(label='Use same settings for both models', value=True)
|
218 |
|
219 |
-
#
|
220 |
with gr.Row(visible=True):
|
221 |
negative_prompt = gr.Textbox(
|
222 |
label="Negative Prompt",
|
@@ -276,122 +296,118 @@ with gr.Blocks(css=css) as demo:
|
|
276 |
value=2,
|
277 |
)
|
278 |
|
279 |
-
#
|
280 |
-
with gr.Row(visible=False):
|
281 |
-
negative_prompt_A = gr.Textbox(
|
282 |
-
label="Negative Prompt (Model A)",
|
283 |
-
info="Describe what you don't want in the image",
|
284 |
-
value="deformed, distorted, disfigured, poorly drawn, bad anatomy, incorrect anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
|
285 |
-
placeholder="Ugly, bad anatomy...",
|
286 |
-
)
|
287 |
-
negative_prompt_B = gr.Textbox(
|
288 |
-
label="Negative Prompt (Model B)",
|
289 |
-
info="Describe what you don't want in the image",
|
290 |
-
value="deformed, distorted, disfigured, poorly drawn, bad anatomy, incorrect anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
|
291 |
-
placeholder="Ugly, bad anatomy...",
|
292 |
-
)
|
293 |
-
with gr.Row(visible=False):
|
294 |
-
num_inference_steps_A = gr.Slider(
|
295 |
-
label="Number of Inference Steps (Model A)",
|
296 |
-
info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference",
|
297 |
-
minimum=1,
|
298 |
-
maximum=50,
|
299 |
-
value=25,
|
300 |
-
step=1,
|
301 |
-
)
|
302 |
-
num_inference_steps_B = gr.Slider(
|
303 |
-
label="Number of Inference Steps (Model B)",
|
304 |
-
info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference",
|
305 |
-
minimum=1,
|
306 |
-
maximum=50,
|
307 |
-
value=25,
|
308 |
-
step=1,
|
309 |
-
)
|
310 |
-
with gr.Row(visible=False):
|
311 |
-
width_A = gr.Slider(
|
312 |
-
label="Width (Model A)",
|
313 |
-
info="Width of the Image",
|
314 |
-
minimum=256,
|
315 |
-
maximum=1344,
|
316 |
-
step=32,
|
317 |
-
value=1024,
|
318 |
-
)
|
319 |
-
width_B = gr.Slider(
|
320 |
-
label="Width (Model B)",
|
321 |
-
info="Width of the Image",
|
322 |
-
minimum=256,
|
323 |
-
maximum=1344,
|
324 |
-
step=32,
|
325 |
-
value=1024,
|
326 |
-
)
|
327 |
-
with gr.Row(visible=False):
|
328 |
-
height_A = gr.Slider(
|
329 |
-
label="Height (Model A)",
|
330 |
-
info="Height of the Image",
|
331 |
-
minimum=256,
|
332 |
-
maximum=1344,
|
333 |
-
step=32,
|
334 |
-
value=1024,
|
335 |
-
)
|
336 |
-
height_B = gr.Slider(
|
337 |
-
label="Height (Model B)",
|
338 |
-
info="Height of the Image",
|
339 |
-
minimum=256,
|
340 |
-
maximum=1344,
|
341 |
-
step=32,
|
342 |
-
value=1024,
|
343 |
-
)
|
344 |
-
with gr.Row(visible=False):
|
345 |
-
guidance_scale_A = gr.Slider(
|
346 |
-
label="Guidance Scale (Model A)",
|
347 |
-
info="Controls how much the image generation process follows the text prompt. Higher values make the image stick more closely to the input text.",
|
348 |
-
minimum=0.0,
|
349 |
-
maximum=10.0,
|
350 |
-
value=7.5,
|
351 |
-
step=0.1,
|
352 |
-
)
|
353 |
-
guidance_scale_B = gr.Slider(
|
354 |
-
label="Guidance Scale (Model B)",
|
355 |
-
info="Controls how much the image generation process follows the text prompt. Higher values make the image stick more closely to the input text.",
|
356 |
-
minimum=0.0,
|
357 |
-
maximum=10.0,
|
358 |
-
value=7.5,
|
359 |
-
step=0.1,
|
360 |
-
)
|
361 |
-
with gr.Row(visible=False):
|
362 |
-
seed_A = gr.Slider(
|
363 |
-
value=42,
|
364 |
-
minimum=0,
|
365 |
-
maximum=MAX_SEED,
|
366 |
-
step=1,
|
367 |
-
label="Seed (Model A)",
|
368 |
-
info="A starting point to initiate the generation process, put 0 for a random one",
|
369 |
-
)
|
370 |
-
seed_B = gr.Slider(
|
371 |
-
value=42,
|
372 |
-
minimum=0,
|
373 |
-
maximum=MAX_SEED,
|
374 |
-
step=1,
|
375 |
-
label="Seed (Model B)",
|
376 |
-
info="A starting point to initiate the generation process, put 0 for a random one",
|
377 |
-
)
|
378 |
with gr.Row(visible=False):
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
395 |
|
396 |
gr.Examples(
|
397 |
examples=examples,
|
@@ -408,13 +424,20 @@ with gr.Blocks(css=css) as demo:
|
|
408 |
fn=generate_arena_images,
|
409 |
inputs=[
|
410 |
prompt,
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
418 |
model_choice_A,
|
419 |
model_choice_B,
|
420 |
use_same_settings,
|
|
|
84 |
@spaces.GPU(duration=80)
|
85 |
def generate_arena_images(
|
86 |
prompt,
|
87 |
+
negative_prompt_A,
|
88 |
+
negative_prompt_B,
|
89 |
+
num_inference_steps_A,
|
90 |
+
num_inference_steps_B,
|
91 |
+
height_A,
|
92 |
+
height_B,
|
93 |
+
width_A,
|
94 |
+
width_B,
|
95 |
+
guidance_scale_A,
|
96 |
+
guidance_scale_B,
|
97 |
+
seed_A,
|
98 |
+
seed_B,
|
99 |
+
num_images_per_prompt_A,
|
100 |
+
num_images_per_prompt_B,
|
101 |
model_choice_A,
|
102 |
model_choice_B,
|
103 |
use_same_settings,
|
104 |
progress=gr.Progress(track_tqdm=True),
|
105 |
):
|
106 |
+
if seed_A == 0:
|
107 |
+
seed_A = random.randint(1, 2**32 - 1)
|
108 |
+
if seed_B == 0:
|
109 |
+
seed_B = random.randint(1, 2**32 - 1)
|
110 |
|
111 |
+
generator_A = torch.Generator().manual_seed(seed_A)
|
112 |
+
generator_B = torch.Generator().manual_seed(seed_B)
|
113 |
+
|
114 |
+
# Apply settings based on use_same_settings
|
115 |
+
if use_same_settings:
|
116 |
+
num_inference_steps_B = num_inference_steps_A
|
117 |
+
height_B = height_A
|
118 |
+
width_B = width_A
|
119 |
+
guidance_scale_B = guidance_scale_A
|
120 |
+
negative_prompt_B = negative_prompt_A
|
121 |
+
seed_B = seed_A
|
122 |
+
num_images_per_prompt_B = num_images_per_prompt_A
|
123 |
|
124 |
# Generate images for both models
|
125 |
images_A = generate_single_image(
|
126 |
prompt,
|
127 |
+
negative_prompt_A,
|
128 |
+
num_inference_steps_A,
|
129 |
+
height_A,
|
130 |
+
width_A,
|
131 |
+
guidance_scale_A,
|
132 |
+
seed_A,
|
133 |
+
num_images_per_prompt_A,
|
134 |
model_choice_A,
|
135 |
+
generator_A,
|
136 |
)
|
137 |
images_B = generate_single_image(
|
138 |
prompt,
|
139 |
+
negative_prompt_B,
|
140 |
+
num_inference_steps_B,
|
141 |
+
height_B,
|
142 |
+
width_B,
|
143 |
+
guidance_scale_B,
|
144 |
+
seed_B,
|
145 |
+
num_images_per_prompt_B,
|
146 |
model_choice_B,
|
147 |
+
generator_B,
|
148 |
)
|
149 |
|
150 |
return images_A, images_B
|
|
|
236 |
with gr.Accordion("Advanced options", open=False):
|
237 |
use_same_settings = gr.Checkbox(label='Use same settings for both models', value=True)
|
238 |
|
239 |
+
# UI elements for shared settings
|
240 |
with gr.Row(visible=True):
|
241 |
negative_prompt = gr.Textbox(
|
242 |
label="Negative Prompt",
|
|
|
296 |
value=2,
|
297 |
)
|
298 |
|
299 |
+
# UI elements for separate settings (hidden by default)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
with gr.Row(visible=False):
|
301 |
+
with gr.Column(scale=1):
|
302 |
+
negative_prompt_A = gr.Textbox(
|
303 |
+
label="Negative Prompt (Model A)",
|
304 |
+
info="Describe what you don't want in the image",
|
305 |
+
value="deformed, distorted, disfigured, poorly drawn, bad anatomy, incorrect anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
|
306 |
+
placeholder="Ugly, bad anatomy...",
|
307 |
+
)
|
308 |
+
num_inference_steps_A = gr.Slider(
|
309 |
+
label="Number of Inference Steps (Model A)",
|
310 |
+
info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference",
|
311 |
+
minimum=1,
|
312 |
+
maximum=50,
|
313 |
+
value=25,
|
314 |
+
step=1,
|
315 |
+
)
|
316 |
+
width_A = gr.Slider(
|
317 |
+
label="Width (Model A)",
|
318 |
+
info="Width of the Image",
|
319 |
+
minimum=256,
|
320 |
+
maximum=1344,
|
321 |
+
step=32,
|
322 |
+
value=1024,
|
323 |
+
)
|
324 |
+
height_A = gr.Slider(
|
325 |
+
label="Height (Model A)",
|
326 |
+
info="Height of the Image",
|
327 |
+
minimum=256,
|
328 |
+
maximum=1344,
|
329 |
+
step=32,
|
330 |
+
value=1024,
|
331 |
+
)
|
332 |
+
guidance_scale_A = gr.Slider(
|
333 |
+
label="Guidance Scale (Model A)",
|
334 |
+
info="Controls how much the image generation process follows the text prompt. Higher values make the image stick more closely to the input text.",
|
335 |
+
minimum=0.0,
|
336 |
+
maximum=10.0,
|
337 |
+
value=7.5,
|
338 |
+
step=0.1,
|
339 |
+
)
|
340 |
+
seed_A = gr.Slider(
|
341 |
+
value=42,
|
342 |
+
minimum=0,
|
343 |
+
maximum=MAX_SEED,
|
344 |
+
step=1,
|
345 |
+
label="Seed (Model A)",
|
346 |
+
info="A starting point to initiate the generation process, put 0 for a random one",
|
347 |
+
)
|
348 |
+
num_images_per_prompt_A = gr.Slider(
|
349 |
+
label="Images Per Prompt (Model A)",
|
350 |
+
info="Number of Images to generate with the settings",
|
351 |
+
minimum=1,
|
352 |
+
maximum=4,
|
353 |
+
step=1,
|
354 |
+
value=2,
|
355 |
+
)
|
356 |
+
with gr.Column(scale=1):
|
357 |
+
negative_prompt_B = gr.Textbox(
|
358 |
+
label="Negative Prompt (Model B)",
|
359 |
+
info="Describe what you don't want in the image",
|
360 |
+
value="deformed, distorted, disfigured, poorly drawn, bad anatomy, incorrect anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
|
361 |
+
placeholder="Ugly, bad anatomy...",
|
362 |
+
)
|
363 |
+
num_inference_steps_B = gr.Slider(
|
364 |
+
label="Number of Inference Steps (Model B)",
|
365 |
+
info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference",
|
366 |
+
minimum=1,
|
367 |
+
maximum=50,
|
368 |
+
value=25,
|
369 |
+
step=1,
|
370 |
+
)
|
371 |
+
width_B = gr.Slider(
|
372 |
+
label="Width (Model B)",
|
373 |
+
info="Width of the Image",
|
374 |
+
minimum=256,
|
375 |
+
maximum=1344,
|
376 |
+
step=32,
|
377 |
+
value=1024,
|
378 |
+
)
|
379 |
+
height_B = gr.Slider(
|
380 |
+
label="Height (Model B)",
|
381 |
+
info="Height of the Image",
|
382 |
+
minimum=256,
|
383 |
+
maximum=1344,
|
384 |
+
step=32,
|
385 |
+
value=1024,
|
386 |
+
)
|
387 |
+
guidance_scale_B = gr.Slider(
|
388 |
+
label="Guidance Scale (Model B)",
|
389 |
+
info="Controls how much the image generation process follows the text prompt. Higher values make the image stick more closely to the input text.",
|
390 |
+
minimum=0.0,
|
391 |
+
maximum=10.0,
|
392 |
+
value=7.5,
|
393 |
+
step=0.1,
|
394 |
+
)
|
395 |
+
seed_B = gr.Slider(
|
396 |
+
value=42,
|
397 |
+
minimum=0,
|
398 |
+
maximum=MAX_SEED,
|
399 |
+
step=1,
|
400 |
+
label="Seed (Model B)",
|
401 |
+
info="A starting point to initiate the generation process, put 0 for a random one",
|
402 |
+
)
|
403 |
+
num_images_per_prompt_B = gr.Slider(
|
404 |
+
label="Images Per Prompt (Model B)",
|
405 |
+
info="Number of Images to generate with the settings",
|
406 |
+
minimum=1,
|
407 |
+
maximum=4,
|
408 |
+
step=1,
|
409 |
+
value=2,
|
410 |
+
)
|
411 |
|
412 |
gr.Examples(
|
413 |
examples=examples,
|
|
|
424 |
fn=generate_arena_images,
|
425 |
inputs=[
|
426 |
prompt,
|
427 |
+
negative_prompt_A,
|
428 |
+
negative_prompt_B,
|
429 |
+
num_inference_steps_A,
|
430 |
+
num_inference_steps_B,
|
431 |
+
height_A,
|
432 |
+
height_B,
|
433 |
+
width_A,
|
434 |
+
width_B,
|
435 |
+
guidance_scale_A,
|
436 |
+
guidance_scale_B,
|
437 |
+
seed_A,
|
438 |
+
seed_B,
|
439 |
+
num_images_per_prompt_A,
|
440 |
+
num_images_per_prompt_B,
|
441 |
model_choice_A,
|
442 |
model_choice_B,
|
443 |
use_same_settings,
|