Nick088 commited on
Commit
9c246a3
·
verified ·
1 Parent(s): 7062b75

made use the same settings checkbox UI prettier

Browse files
Files changed (1) hide show
  1. app.py +110 -49
app.py CHANGED
@@ -84,16 +84,11 @@ def generate_single_image(
84
  @spaces.GPU(duration=80)
85
  def generate_arena_images(
86
  prompt,
87
- negative_prompt_A,
88
- negative_prompt_B,
89
- num_inference_steps_A,
90
- num_inference_steps_B,
91
- height_A,
92
- height_B,
93
- width_A,
94
- width_B,
95
- guidance_scale_A,
96
- guidance_scale_B,
97
  seed,
98
  num_images_per_prompt,
99
  model_choice_A,
@@ -106,22 +101,14 @@ def generate_arena_images(
106
 
107
  generator = torch.Generator().manual_seed(seed)
108
 
109
- # Apply settings based on use_same_settings
110
- if use_same_settings:
111
- num_inference_steps_B = num_inference_steps_A
112
- height_B = height_A
113
- width_B = width_A
114
- guidance_scale_B = guidance_scale_A
115
- negative_prompt_B = negative_prompt_A
116
-
117
  # Generate images for both models
118
  images_A = generate_single_image(
119
  prompt,
120
- negative_prompt_A,
121
- num_inference_steps_A,
122
- height_A,
123
- width_A,
124
- guidance_scale_A,
125
  seed,
126
  num_images_per_prompt,
127
  model_choice_A,
@@ -129,11 +116,11 @@ def generate_arena_images(
129
  )
130
  images_B = generate_single_image(
131
  prompt,
132
- negative_prompt_B,
133
- num_inference_steps_B,
134
- height_B,
135
- width_B,
136
- guidance_scale_B,
137
  seed,
138
  num_images_per_prompt,
139
  model_choice_B,
@@ -228,7 +215,69 @@ with gr.Blocks(css=css) as demo:
228
  result_B = gr.Gallery(label="Generated Images (Model B)", elem_id="gallery_B")
229
  with gr.Accordion("Advanced options", open=False):
230
  use_same_settings = gr.Checkbox(label='Use same settings for both models', value=True)
231
- with gr.Row():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
  negative_prompt_A = gr.Textbox(
233
  label="Negative Prompt (Model A)",
234
  info="Describe what you don't want in the image",
@@ -241,7 +290,7 @@ with gr.Blocks(css=css) as demo:
241
  value="deformed, distorted, disfigured, poorly drawn, bad anatomy, incorrect anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
242
  placeholder="Ugly, bad anatomy...",
243
  )
244
- with gr.Row():
245
  num_inference_steps_A = gr.Slider(
246
  label="Number of Inference Steps (Model A)",
247
  info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference",
@@ -258,7 +307,7 @@ with gr.Blocks(css=css) as demo:
258
  value=25,
259
  step=1,
260
  )
261
- with gr.Row():
262
  width_A = gr.Slider(
263
  label="Width (Model A)",
264
  info="Width of the Image",
@@ -275,7 +324,7 @@ with gr.Blocks(css=css) as demo:
275
  step=32,
276
  value=1024,
277
  )
278
- with gr.Row():
279
  height_A = gr.Slider(
280
  label="Height (Model A)",
281
  info="Height of the Image",
@@ -292,7 +341,7 @@ with gr.Blocks(css=css) as demo:
292
  step=32,
293
  value=1024,
294
  )
295
- with gr.Row():
296
  guidance_scale_A = gr.Slider(
297
  label="Guidance Scale (Model A)",
298
  info="Controls how much the image generation process follows the text prompt. Higher values make the image stick more closely to the input text.",
@@ -309,17 +358,34 @@ with gr.Blocks(css=css) as demo:
309
  value=7.5,
310
  step=0.1,
311
  )
312
- with gr.Row():
313
- seed = gr.Slider(
314
  value=42,
315
  minimum=0,
316
  maximum=MAX_SEED,
317
  step=1,
318
- label="Seed",
319
  info="A starting point to initiate the generation process, put 0 for a random one",
320
  )
321
- num_images_per_prompt = gr.Slider(
322
- label="Images Per Prompt",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
  info="Number of Images to generate with the settings",
324
  minimum=1,
325
  maximum=4,
@@ -342,21 +408,16 @@ with gr.Blocks(css=css) as demo:
342
  fn=generate_arena_images,
343
  inputs=[
344
  prompt,
345
- negative_prompt_A,
346
- negative_prompt_B,
347
- num_inference_steps_A,
348
- num_inference_steps_B,
349
- height_A,
350
- height_B,
351
- width_A,
352
- width_B,
353
- guidance_scale_A,
354
- guidance_scale_B,
355
  seed,
356
  num_images_per_prompt,
357
  model_choice_A,
358
  model_choice_B,
359
- use_same_settings
360
  ],
361
  outputs=[result_A, result_B],
362
  )
 
84
  @spaces.GPU(duration=80)
85
  def generate_arena_images(
86
  prompt,
87
+ negative_prompt,
88
+ num_inference_steps,
89
+ height,
90
+ width,
91
+ guidance_scale,
 
 
 
 
 
92
  seed,
93
  num_images_per_prompt,
94
  model_choice_A,
 
101
 
102
  generator = torch.Generator().manual_seed(seed)
103
 
 
 
 
 
 
 
 
 
104
  # Generate images for both models
105
  images_A = generate_single_image(
106
  prompt,
107
+ negative_prompt,
108
+ num_inference_steps,
109
+ height,
110
+ width,
111
+ guidance_scale,
112
  seed,
113
  num_images_per_prompt,
114
  model_choice_A,
 
116
  )
117
  images_B = generate_single_image(
118
  prompt,
119
+ negative_prompt,
120
+ num_inference_steps,
121
+ height,
122
+ width,
123
+ guidance_scale,
124
  seed,
125
  num_images_per_prompt,
126
  model_choice_B,
 
215
  result_B = gr.Gallery(label="Generated Images (Model B)", elem_id="gallery_B")
216
  with gr.Accordion("Advanced options", open=False):
217
  use_same_settings = gr.Checkbox(label='Use same settings for both models', value=True)
218
+
219
+ # Conditional UI elements based on use_same_settings
220
+ with gr.Row(visible=True):
221
+ negative_prompt = gr.Textbox(
222
+ label="Negative Prompt",
223
+ info="Describe what you don't want in the image",
224
+ value="deformed, distorted, disfigured, poorly drawn, bad anatomy, incorrect anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
225
+ placeholder="Ugly, bad anatomy...",
226
+ )
227
+ with gr.Row(visible=True):
228
+ num_inference_steps = gr.Slider(
229
+ label="Number of Inference Steps",
230
+ info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference",
231
+ minimum=1,
232
+ maximum=50,
233
+ value=25,
234
+ step=1,
235
+ )
236
+ guidance_scale = gr.Slider(
237
+ label="Guidance Scale",
238
+ info="Controls how much the image generation process follows the text prompt. Higher values make the image stick more closely to the input text.",
239
+ minimum=0.0,
240
+ maximum=10.0,
241
+ value=7.5,
242
+ step=0.1,
243
+ )
244
+ with gr.Row(visible=True):
245
+ width = gr.Slider(
246
+ label="Width",
247
+ info="Width of the Image",
248
+ minimum=256,
249
+ maximum=1344,
250
+ step=32,
251
+ value=1024,
252
+ )
253
+ height = gr.Slider(
254
+ label="Height",
255
+ info="Height of the Image",
256
+ minimum=256,
257
+ maximum=1344,
258
+ step=32,
259
+ value=1024,
260
+ )
261
+ with gr.Row(visible=True):
262
+ seed = gr.Slider(
263
+ value=42,
264
+ minimum=0,
265
+ maximum=MAX_SEED,
266
+ step=1,
267
+ label="Seed",
268
+ info="A starting point to initiate the generation process, put 0 for a random one",
269
+ )
270
+ num_images_per_prompt = gr.Slider(
271
+ label="Images Per Prompt",
272
+ info="Number of Images to generate with the settings",
273
+ minimum=1,
274
+ maximum=4,
275
+ step=1,
276
+ value=2,
277
+ )
278
+
279
+ # Conditional UI elements based on use_same_settings
280
+ with gr.Row(visible=False):
281
  negative_prompt_A = gr.Textbox(
282
  label="Negative Prompt (Model A)",
283
  info="Describe what you don't want in the image",
 
290
  value="deformed, distorted, disfigured, poorly drawn, bad anatomy, incorrect anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
291
  placeholder="Ugly, bad anatomy...",
292
  )
293
+ with gr.Row(visible=False):
294
  num_inference_steps_A = gr.Slider(
295
  label="Number of Inference Steps (Model A)",
296
  info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference",
 
307
  value=25,
308
  step=1,
309
  )
310
+ with gr.Row(visible=False):
311
  width_A = gr.Slider(
312
  label="Width (Model A)",
313
  info="Width of the Image",
 
324
  step=32,
325
  value=1024,
326
  )
327
+ with gr.Row(visible=False):
328
  height_A = gr.Slider(
329
  label="Height (Model A)",
330
  info="Height of the Image",
 
341
  step=32,
342
  value=1024,
343
  )
344
+ with gr.Row(visible=False):
345
  guidance_scale_A = gr.Slider(
346
  label="Guidance Scale (Model A)",
347
  info="Controls how much the image generation process follows the text prompt. Higher values make the image stick more closely to the input text.",
 
358
  value=7.5,
359
  step=0.1,
360
  )
361
+ with gr.Row(visible=False):
362
+ seed_A = gr.Slider(
363
  value=42,
364
  minimum=0,
365
  maximum=MAX_SEED,
366
  step=1,
367
+ label="Seed (Model A)",
368
  info="A starting point to initiate the generation process, put 0 for a random one",
369
  )
370
+ seed_B = gr.Slider(
371
+ value=42,
372
+ minimum=0,
373
+ maximum=MAX_SEED,
374
+ step=1,
375
+ label="Seed (Model B)",
376
+ info="A starting point to initiate the generation process, put 0 for a random one",
377
+ )
378
+ with gr.Row(visible=False):
379
+ num_images_per_prompt_A = gr.Slider(
380
+ label="Images Per Prompt (Model A)",
381
+ info="Number of Images to generate with the settings",
382
+ minimum=1,
383
+ maximum=4,
384
+ step=1,
385
+ value=2,
386
+ )
387
+ num_images_per_prompt_B = gr.Slider(
388
+ label="Images Per Prompt (Model B)",
389
  info="Number of Images to generate with the settings",
390
  minimum=1,
391
  maximum=4,
 
408
  fn=generate_arena_images,
409
  inputs=[
410
  prompt,
411
+ negative_prompt,
412
+ num_inference_steps,
413
+ height,
414
+ width,
415
+ guidance_scale,
 
 
 
 
 
416
  seed,
417
  num_images_per_prompt,
418
  model_choice_A,
419
  model_choice_B,
420
+ use_same_settings,
421
  ],
422
  outputs=[result_A, result_B],
423
  )