Sergidev commited on
Commit
782f715
·
verified ·
1 Parent(s): 305b4b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -55
app.py CHANGED
@@ -27,6 +27,7 @@ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
27
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
28
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
29
  OUTPUT_DIR = os.getenv("OUTPUT_DIR", "./outputs")
 
30
 
31
  MODEL = os.getenv(
32
  "MODEL",
@@ -37,7 +38,7 @@ DESCRIPTION = '''
37
  <div>
38
  <h1 style="text-align: center;">High Definition Pony Diffusion</h1>
39
  <p>Gradio demo for PonyDiffusion v6 with image gallery, json prompt support, advanced options and more.</p>
40
- <p>❤️ Thanks for ✨4000 visits! Heart this space if you like it!</p>
41
  <p>🔎 For more details about me, take a look at <a href="https://sergidev.me">My website</a>.</p>
42
  <p>🌚 For dark mode compatibility, click <a href="https://sergidev.me/hdiffusion">here</a>.</p>
43
  </div>
@@ -114,6 +115,7 @@ def generate(
114
  upscaler_strength: float = 0.55,
115
  upscale_by: float = 1.5,
116
  json_params: str = "",
 
117
  progress=gr.Progress(track_tqdm=True),
118
  ) -> Image:
119
  if json_params:
@@ -154,6 +156,7 @@ def generate(
154
  "num_inference_steps": num_inference_steps,
155
  "seed": seed,
156
  "sampler": sampler,
 
157
  }
158
 
159
  if use_upscaler:
@@ -170,46 +173,50 @@ def generate(
170
  logger.info(json.dumps(metadata, indent=4))
171
 
172
  try:
173
- if use_upscaler:
174
- latents = pipe(
175
- prompt=prompt,
176
- negative_prompt=negative_prompt,
177
- width=width,
178
- height=height,
179
- guidance_scale=guidance_scale,
180
- num_inference_steps=num_inference_steps,
181
- generator=generator,
182
- output_type="latent",
183
- ).images
184
- upscaled_latents = utils.upscale(latents, "nearest-exact", upscale_by)
185
- images = upscaler_pipe(
186
- prompt=prompt,
187
- negative_prompt=negative_prompt,
188
- image=upscaled_latents,
189
- guidance_scale=guidance_scale,
190
- num_inference_steps=num_inference_steps,
191
- strength=upscaler_strength,
192
- generator=generator,
193
- output_type="pil",
194
- ).images
195
- else:
196
- images = pipe(
197
- prompt=prompt,
198
- negative_prompt=negative_prompt,
199
- width=width,
200
- height=height,
201
- guidance_scale=guidance_scale,
202
- num_inference_steps=num_inference_steps,
203
- generator=generator,
204
- output_type="pil",
205
- ).images
206
-
207
- if images and IS_COLAB:
208
- for image in images:
 
 
 
 
209
  filepath = utils.save_image(image, metadata, OUTPUT_DIR)
210
  logger.info(f"Image saved as {filepath} with metadata")
211
 
212
- return images, metadata
213
  except Exception as e:
214
  logger.exception(f"An error occurred: {e}")
215
  raise
@@ -229,16 +236,18 @@ def handle_image_click(evt: gr.SelectData):
229
  return selected["image"], json.dumps(selected["metadata"], indent=2)
230
 
231
  def generate_and_update_history(*args, **kwargs):
 
232
  images, metadata = generate(*args, **kwargs)
233
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
234
- generation_history.insert(0, {
235
- "prompt": metadata["prompt"],
236
- "timestamp": timestamp,
237
- "image": images[0],
238
- "metadata": metadata
239
- })
 
240
  if len(generation_history) > 20:
241
- generation_history.pop()
242
  return images[0], json.dumps(metadata, indent=2), update_history_list()
243
 
244
  with open('characterfull.txt', 'r') as f:
@@ -262,6 +271,12 @@ if torch.cuda.is_available():
262
  else:
263
  pipe = None
264
 
 
 
 
 
 
 
265
  with gr.Blocks(css="style.css") as demo:
266
  gr.Markdown(DESCRIPTION)
267
 
@@ -365,6 +380,13 @@ with gr.Blocks(css="style.css") as demo:
365
  step=1,
366
  value=28,
367
  )
 
 
 
 
 
 
 
368
 
369
  with gr.Accordion(label="Generation Parameters", open=False):
370
  gr_metadata = gr.JSON(label="Metadata", show_label=False)
@@ -372,17 +394,25 @@ with gr.Blocks(css="style.css") as demo:
372
  generate_from_json = gr.Button("Generate from JSON")
373
 
374
  with gr.Accordion("Generation History", open=False) as history_accordion:
375
- history_gallery = gr.Gallery(
376
- label="History",
377
- show_label=False,
378
- elem_id="history_gallery",
379
- columns=5,
380
- rows=2,
381
- height="auto"
382
  )
383
- with gr.Row():
384
- selected_image = gr.Image(label="Selected Image", interactive=False)
385
- selected_metadata = gr.JSON(label="Selected Metadata", show_label=False)
 
 
 
 
 
 
 
 
 
 
 
386
 
387
  gr.Examples(
388
  examples=config.examples,
@@ -422,6 +452,7 @@ with gr.Blocks(css="style.css") as demo:
422
  upscaler_strength,
423
  upscale_by,
424
  json_input,
 
425
  ]
426
 
427
  prompt.submit(
@@ -483,5 +514,11 @@ with gr.Blocks(css="style.css") as demo:
483
  inputs=[],
484
  outputs=[selected_image, selected_metadata]
485
  )
 
 
 
 
 
 
486
 
487
  demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)
 
27
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
28
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
29
  OUTPUT_DIR = os.getenv("OUTPUT_DIR", "./outputs")
30
+ HISTORY_SECRET = os.getenv("HISTORY_SECRET", "default_secret")
31
 
32
  MODEL = os.getenv(
33
  "MODEL",
 
38
  <div>
39
  <h1 style="text-align: center;">High Definition Pony Diffusion</h1>
40
  <p>Gradio demo for PonyDiffusion v6 with image gallery, json prompt support, advanced options and more.</p>
41
+ <p>❤️ Thanks for ✨5000 visits! Heart this space if you like it!</p>
42
  <p>🔎 For more details about me, take a look at <a href="https://sergidev.me">My website</a>.</p>
43
  <p>🌚 For dark mode compatibility, click <a href="https://sergidev.me/hdiffusion">here</a>.</p>
44
  </div>
 
115
  upscaler_strength: float = 0.55,
116
  upscale_by: float = 1.5,
117
  json_params: str = "",
118
+ batch_size: int = 1,
119
  progress=gr.Progress(track_tqdm=True),
120
  ) -> Image:
121
  if json_params:
 
156
  "num_inference_steps": num_inference_steps,
157
  "seed": seed,
158
  "sampler": sampler,
159
+ "batch_size": batch_size,
160
  }
161
 
162
  if use_upscaler:
 
173
  logger.info(json.dumps(metadata, indent=4))
174
 
175
  try:
176
+ all_images = []
177
+ for _ in range(batch_size):
178
+ batch_generator = utils.seed_everything(random.randint(0, utils.MAX_SEED))
179
+ if use_upscaler:
180
+ latents = pipe(
181
+ prompt=prompt,
182
+ negative_prompt=negative_prompt,
183
+ width=width,
184
+ height=height,
185
+ guidance_scale=guidance_scale,
186
+ num_inference_steps=num_inference_steps,
187
+ generator=batch_generator,
188
+ output_type="latent",
189
+ ).images
190
+ upscaled_latents = utils.upscale(latents, "nearest-exact", upscale_by)
191
+ images = upscaler_pipe(
192
+ prompt=prompt,
193
+ negative_prompt=negative_prompt,
194
+ image=upscaled_latents,
195
+ guidance_scale=guidance_scale,
196
+ num_inference_steps=num_inference_steps,
197
+ strength=upscaler_strength,
198
+ generator=batch_generator,
199
+ output_type="pil",
200
+ ).images
201
+ else:
202
+ images = pipe(
203
+ prompt=prompt,
204
+ negative_prompt=negative_prompt,
205
+ width=width,
206
+ height=height,
207
+ guidance_scale=guidance_scale,
208
+ num_inference_steps=num_inference_steps,
209
+ generator=batch_generator,
210
+ output_type="pil",
211
+ ).images
212
+ all_images.extend(images)
213
+
214
+ if all_images and IS_COLAB:
215
+ for image in all_images:
216
  filepath = utils.save_image(image, metadata, OUTPUT_DIR)
217
  logger.info(f"Image saved as {filepath} with metadata")
218
 
219
+ return all_images, metadata
220
  except Exception as e:
221
  logger.exception(f"An error occurred: {e}")
222
  raise
 
236
  return selected["image"], json.dumps(selected["metadata"], indent=2)
237
 
238
  def generate_and_update_history(*args, **kwargs):
239
+ global generation_history
240
  images, metadata = generate(*args, **kwargs)
241
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
242
+ for image in images:
243
+ generation_history.insert(0, {
244
+ "prompt": metadata["prompt"],
245
+ "timestamp": timestamp,
246
+ "image": image,
247
+ "metadata": metadata
248
+ })
249
  if len(generation_history) > 20:
250
+ generation_history = generation_history[:20]
251
  return images[0], json.dumps(metadata, indent=2), update_history_list()
252
 
253
  with open('characterfull.txt', 'r') as f:
 
271
  else:
272
  pipe = None
273
 
274
+ def check_history_password(password):
275
+ if password == HISTORY_SECRET:
276
+ return gr.update(visible=True)
277
+ else:
278
+ return gr.update(visible=False)
279
+
280
  with gr.Blocks(css="style.css") as demo:
281
  gr.Markdown(DESCRIPTION)
282
 
 
380
  step=1,
381
  value=28,
382
  )
383
+ batch_size = gr.Slider(
384
+ label="Batch Size",
385
+ minimum=1,
386
+ maximum=4,
387
+ step=1,
388
+ value=1,
389
+ )
390
 
391
  with gr.Accordion(label="Generation Parameters", open=False):
392
  gr_metadata = gr.JSON(label="Metadata", show_label=False)
 
394
  generate_from_json = gr.Button("Generate from JSON")
395
 
396
  with gr.Accordion("Generation History", open=False) as history_accordion:
397
+ history_password = gr.Textbox(
398
+ label="Global generation history",
399
+ type="password",
400
+ placeholder="Enter secret for generation history"
 
 
 
401
  )
402
+ history_submit = gr.Button("Submit")
403
+
404
+ with gr.Group(visible=False) as history_content:
405
+ history_gallery = gr.Gallery(
406
+ label="History",
407
+ show_label=False,
408
+ elem_id="history_gallery",
409
+ columns=5,
410
+ rows=2,
411
+ height="auto"
412
+ )
413
+ with gr.Row():
414
+ selected_image = gr.Image(label="Selected Image", interactive=False)
415
+ selected_metadata = gr.JSON(label="Selected Metadata", show_label=False)
416
 
417
  gr.Examples(
418
  examples=config.examples,
 
452
  upscaler_strength,
453
  upscale_by,
454
  json_input,
455
+ batch_size,
456
  ]
457
 
458
  prompt.submit(
 
514
  inputs=[],
515
  outputs=[selected_image, selected_metadata]
516
  )
517
+
518
+ history_submit.click(
519
+ fn=check_history_password,
520
+ inputs=[history_password],
521
+ outputs=[history_content],
522
+ )
523
 
524
  demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)