Nick088 commited on
Commit
7062b75
·
verified ·
1 Parent(s): 0e7c9e6

choose in arena if to use same settings for both models

Browse files
Files changed (1) hide show
  1. app.py +113 -52
app.py CHANGED
@@ -84,15 +84,21 @@ def generate_single_image(
84
  @spaces.GPU(duration=80)
85
  def generate_arena_images(
86
  prompt,
87
- negative_prompt,
88
- num_inference_steps,
89
- height,
90
- width,
91
- guidance_scale,
 
 
 
 
 
92
  seed,
93
  num_images_per_prompt,
94
- model_choice_1,
95
- model_choice_2,
 
96
  progress=gr.Progress(track_tqdm=True),
97
  ):
98
  if seed == 0:
@@ -100,33 +106,41 @@ def generate_arena_images(
100
 
101
  generator = torch.Generator().manual_seed(seed)
102
 
 
 
 
 
 
 
 
 
103
  # Generate images for both models
104
- images_1 = generate_single_image(
105
  prompt,
106
- negative_prompt,
107
- num_inference_steps,
108
- height,
109
- width,
110
- guidance_scale,
111
  seed,
112
  num_images_per_prompt,
113
- model_choice_1,
114
  generator,
115
  )
116
- images_2 = generate_single_image(
117
  prompt,
118
- negative_prompt,
119
- num_inference_steps,
120
- height,
121
- width,
122
- guidance_scale,
123
  seed,
124
  num_images_per_prompt,
125
- model_choice_2,
126
  generator,
127
  )
128
 
129
- return images_1, images_2
130
 
131
  # Define the image generation function for the Individual tab
132
  @spaces.GPU(duration=80)
@@ -199,61 +213,102 @@ with gr.Blocks(css=css) as demo:
199
  info="Describe the image you want",
200
  placeholder="A cat...",
201
  )
202
- model_choice_1 = gr.Dropdown(
203
- label="Stable Diffusion Model 1",
204
  choices=["sd3 medium", "sd2.1", "sdxl", "sdxl flash"],
205
  value="sd3 medium",
206
  )
207
- model_choice_2 = gr.Dropdown(
208
- label="Stable Diffusion Model 2",
209
  choices=["sd3 medium", "sd2.1", "sdxl", "sdxl flash"],
210
  value="sdxl",
211
  )
212
  run_button = gr.Button("Run")
213
- result_1 = gr.Gallery(label="Generated Images (Model 1)", elem_id="gallery_1")
214
- result_2 = gr.Gallery(label="Generated Images (Model 2)", elem_id="gallery_2")
215
  with gr.Accordion("Advanced options", open=False):
 
216
  with gr.Row():
217
- negative_prompt = gr.Textbox(
218
- label="Negative Prompt",
 
 
 
 
 
 
219
  info="Describe what you don't want in the image",
220
  value="deformed, distorted, disfigured, poorly drawn, bad anatomy, incorrect anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
221
  placeholder="Ugly, bad anatomy...",
222
  )
223
  with gr.Row():
224
- num_inference_steps = gr.Slider(
225
- label="Number of Inference Steps",
226
  info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference",
227
  minimum=1,
228
  maximum=50,
229
  value=25,
230
  step=1,
231
  )
232
- guidance_scale = gr.Slider(
233
- label="Guidance Scale",
234
- info="Controls how much the image generation process follows the text prompt. Higher values make the image stick more closely to the input text.",
235
- minimum=0.0,
236
- maximum=10.0,
237
- value=7.5,
238
- step=0.1,
239
  )
240
  with gr.Row():
241
- width = gr.Slider(
242
- label="Width",
243
  info="Width of the Image",
244
  minimum=256,
245
  maximum=1344,
246
  step=32,
247
  value=1024,
248
  )
249
- height = gr.Slider(
250
- label="Height",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  info="Height of the Image",
252
  minimum=256,
253
  maximum=1344,
254
  step=32,
255
  value=1024,
256
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  with gr.Row():
258
  seed = gr.Slider(
259
  value=42,
@@ -275,7 +330,7 @@ with gr.Blocks(css=css) as demo:
275
  gr.Examples(
276
  examples=examples,
277
  inputs=[prompt],
278
- outputs=[result_1, result_2],
279
  fn=generate_arena_images,
280
  )
281
 
@@ -287,17 +342,23 @@ with gr.Blocks(css=css) as demo:
287
  fn=generate_arena_images,
288
  inputs=[
289
  prompt,
290
- negative_prompt,
291
- num_inference_steps,
292
- width,
293
- height,
294
- guidance_scale,
 
 
 
 
 
295
  seed,
296
  num_images_per_prompt,
297
- model_choice_1,
298
- model_choice_2,
 
299
  ],
300
- outputs=[result_1, result_2],
301
  )
302
 
303
  with gr.TabItem("Individual"):
 
84
  @spaces.GPU(duration=80)
85
  def generate_arena_images(
86
  prompt,
87
+ negative_prompt_A,
88
+ negative_prompt_B,
89
+ num_inference_steps_A,
90
+ num_inference_steps_B,
91
+ height_A,
92
+ height_B,
93
+ width_A,
94
+ width_B,
95
+ guidance_scale_A,
96
+ guidance_scale_B,
97
  seed,
98
  num_images_per_prompt,
99
+ model_choice_A,
100
+ model_choice_B,
101
+ use_same_settings,
102
  progress=gr.Progress(track_tqdm=True),
103
  ):
104
  if seed == 0:
 
106
 
107
  generator = torch.Generator().manual_seed(seed)
108
 
109
+ # Apply settings based on use_same_settings
110
+ if use_same_settings:
111
+ num_inference_steps_B = num_inference_steps_A
112
+ height_B = height_A
113
+ width_B = width_A
114
+ guidance_scale_B = guidance_scale_A
115
+ negative_prompt_B = negative_prompt_A
116
+
117
  # Generate images for both models
118
+ images_A = generate_single_image(
119
  prompt,
120
+ negative_prompt_A,
121
+ num_inference_steps_A,
122
+ height_A,
123
+ width_A,
124
+ guidance_scale_A,
125
  seed,
126
  num_images_per_prompt,
127
+ model_choice_A,
128
  generator,
129
  )
130
+ images_B = generate_single_image(
131
  prompt,
132
+ negative_prompt_B,
133
+ num_inference_steps_B,
134
+ height_B,
135
+ width_B,
136
+ guidance_scale_B,
137
  seed,
138
  num_images_per_prompt,
139
+ model_choice_B,
140
  generator,
141
  )
142
 
143
+ return images_A, images_B
144
 
145
  # Define the image generation function for the Individual tab
146
  @spaces.GPU(duration=80)
 
213
  info="Describe the image you want",
214
  placeholder="A cat...",
215
  )
216
+ model_choice_A = gr.Dropdown(
217
+ label="Stable Diffusion Model A",
218
  choices=["sd3 medium", "sd2.1", "sdxl", "sdxl flash"],
219
  value="sd3 medium",
220
  )
221
+ model_choice_B = gr.Dropdown(
222
+ label="Stable Diffusion Model B",
223
  choices=["sd3 medium", "sd2.1", "sdxl", "sdxl flash"],
224
  value="sdxl",
225
  )
226
  run_button = gr.Button("Run")
227
+ result_A = gr.Gallery(label="Generated Images (Model A)", elem_id="gallery_A")
228
+ result_B = gr.Gallery(label="Generated Images (Model B)", elem_id="gallery_B")
229
  with gr.Accordion("Advanced options", open=False):
230
+ use_same_settings = gr.Checkbox(label='Use same settings for both models', value=True)
231
  with gr.Row():
232
+ negative_prompt_A = gr.Textbox(
233
+ label="Negative Prompt (Model A)",
234
+ info="Describe what you don't want in the image",
235
+ value="deformed, distorted, disfigured, poorly drawn, bad anatomy, incorrect anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
236
+ placeholder="Ugly, bad anatomy...",
237
+ )
238
+ negative_prompt_B = gr.Textbox(
239
+ label="Negative Prompt (Model B)",
240
  info="Describe what you don't want in the image",
241
  value="deformed, distorted, disfigured, poorly drawn, bad anatomy, incorrect anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
242
  placeholder="Ugly, bad anatomy...",
243
  )
244
  with gr.Row():
245
+ num_inference_steps_A = gr.Slider(
246
+ label="Number of Inference Steps (Model A)",
247
  info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference",
248
  minimum=1,
249
  maximum=50,
250
  value=25,
251
  step=1,
252
  )
253
+ num_inference_steps_B = gr.Slider(
254
+ label="Number of Inference Steps (Model B)",
255
+ info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference",
256
+ minimum=1,
257
+ maximum=50,
258
+ value=25,
259
+ step=1,
260
  )
261
  with gr.Row():
262
+ width_A = gr.Slider(
263
+ label="Width (Model A)",
264
  info="Width of the Image",
265
  minimum=256,
266
  maximum=1344,
267
  step=32,
268
  value=1024,
269
  )
270
+ width_B = gr.Slider(
271
+ label="Width (Model B)",
272
+ info="Width of the Image",
273
+ minimum=256,
274
+ maximum=1344,
275
+ step=32,
276
+ value=1024,
277
+ )
278
+ with gr.Row():
279
+ height_A = gr.Slider(
280
+ label="Height (Model A)",
281
+ info="Height of the Image",
282
+ minimum=256,
283
+ maximum=1344,
284
+ step=32,
285
+ value=1024,
286
+ )
287
+ height_B = gr.Slider(
288
+ label="Height (Model B)",
289
  info="Height of the Image",
290
  minimum=256,
291
  maximum=1344,
292
  step=32,
293
  value=1024,
294
  )
295
+ with gr.Row():
296
+ guidance_scale_A = gr.Slider(
297
+ label="Guidance Scale (Model A)",
298
+ info="Controls how much the image generation process follows the text prompt. Higher values make the image stick more closely to the input text.",
299
+ minimum=0.0,
300
+ maximum=10.0,
301
+ value=7.5,
302
+ step=0.1,
303
+ )
304
+ guidance_scale_B = gr.Slider(
305
+ label="Guidance Scale (Model B)",
306
+ info="Controls how much the image generation process follows the text prompt. Higher values make the image stick more closely to the input text.",
307
+ minimum=0.0,
308
+ maximum=10.0,
309
+ value=7.5,
310
+ step=0.1,
311
+ )
312
  with gr.Row():
313
  seed = gr.Slider(
314
  value=42,
 
330
  gr.Examples(
331
  examples=examples,
332
  inputs=[prompt],
333
+ outputs=[result_A, result_B],
334
  fn=generate_arena_images,
335
  )
336
 
 
342
  fn=generate_arena_images,
343
  inputs=[
344
  prompt,
345
+ negative_prompt_A,
346
+ negative_prompt_B,
347
+ num_inference_steps_A,
348
+ num_inference_steps_B,
349
+ height_A,
350
+ height_B,
351
+ width_A,
352
+ width_B,
353
+ guidance_scale_A,
354
+ guidance_scale_B,
355
  seed,
356
  num_images_per_prompt,
357
+ model_choice_A,
358
+ model_choice_B,
359
+ use_same_settings
360
  ],
361
+ outputs=[result_A, result_B],
362
  )
363
 
364
  with gr.TabItem("Individual"):