prithivMLmods commited on
Commit
4211e4a
·
verified ·
1 Parent(s): 014b60c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -221
app.py CHANGED
@@ -1,13 +1,3 @@
1
- #!/usr/bin/env python
2
- #patch 3.0 ()
3
- # Permission is hereby granted, free of charge, to any person obtaining a copy
4
- # of this software and associated documentation files (the "Software"), to deal
5
- # in the Software without restriction, including without limitation the rights
6
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
- # copies of the Software, and to permit persons to whom the Software is
8
- # furnished to do so, subject to the following conditions:
9
- #
10
- # ...
11
  import os
12
  import random
13
  import uuid
@@ -20,7 +10,6 @@ import torch
20
  from diffusers import DiffusionPipeline, StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
21
  from typing import Tuple
22
 
23
- #BaseConditions--
24
  bad_words = json.loads(os.getenv('BAD_WORDS', "[]"))
25
  bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', "[]"))
26
  default_negative = os.getenv("default_negative","")
@@ -257,22 +246,6 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
257
  negative = ""
258
  return p.replace("{prompt}", positive), n + negative
259
 
260
-
261
-
262
- DESCRIPTION = """## IMAGINEO 4K 🏞️
263
-
264
-
265
- """
266
-
267
- DESCRIPTIONy = """
268
- <p align="left">
269
- <a title="Github" href="https://github.com/PRITHIVSAKTHIUR/Imagineo-4K" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
270
- <img src="https://img.shields.io/github/stars/PRITHIVSAKTHIUR/Imagineo-4K?label=GitHub%20%E2%98%85&logo=github&color=C8C" alt="badge-github-stars">
271
- </a>
272
- </p>
273
- """
274
-
275
-
276
  if not torch.cuda.is_available():
277
  DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
278
 
@@ -311,37 +284,6 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
311
  seed = random.randint(0, MAX_SEED)
312
  return seed
313
 
314
- #Load the HTML content
315
- #html_file_url = "https://prithivmlmods-hamster-static.static.hf.space/index.html"
316
- #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:180px; border:none;"></iframe>'
317
- #html_file_url = "https://prithivmlmods-static-loading-theme.static.hf.space/index.html"
318
- #html_file_url = ""
319
- #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:400px; border:none"></iframe>'
320
-
321
-
322
- #js_func = """
323
-
324
- #<script>
325
- #(function() {
326
- # const url = new URL(window.location);
327
- # const currentTheme = url.searchParams.get('__theme');
328
-
329
- # if (currentTheme !== 'dark') {
330
- # url.searchParams.set('__theme', 'dark');
331
- # history.pushState({}, '', url.href);
332
- # applyDarkTheme();
333
- # }
334
- #})();
335
-
336
-
337
- #function applyDarkTheme() {
338
- # // Example: Apply dark theme styles to body or specific elements
339
- # document.body.classList.add('dark-theme');
340
- # // Additional logic as needed
341
- #}
342
- #</script>
343
- #"""
344
-
345
  @spaces.GPU(enable_queue=True)
346
  def generate(
347
  prompt: str,
@@ -413,34 +355,6 @@ def generate(
413
  save_image(grid_img, unique_name)
414
  return [unique_name], seed
415
 
416
- def load_predefined_images1():
417
- predefined_images1 = [
418
- "Tones/1.png",
419
- "Tones/2.png",
420
- "Tones/3.png",
421
- "Tones/4.png",
422
- "Tones/5.png",
423
- "Tones/6.png",
424
- "Tones/7.png",
425
- "Tones/8.png",
426
- "Tones/9.png",
427
- ]
428
- return predefined_images1
429
-
430
- def load_predefined_images():
431
- predefined_images = [
432
- "assets/11.png",
433
- "assets/22.png",
434
- "assets/33.png",
435
- "assets/44.png",
436
- "assets/55.png",
437
- "assets/66.png",
438
- "assets/77.png",
439
- "assets/88.png",
440
- "assets/99.png",
441
- ]
442
- return predefined_images
443
-
444
  examples = [
445
 
446
  "Chocolate dripping from a donut against a yellow background, in the style of brocore, hyper-realistic oil --ar 2:3 --q 2 --s 750 --v 5 --ar 2:3 --q 2 --s 750 --v 5",
@@ -456,8 +370,8 @@ h1{text-align:center}
456
  '''
457
 
458
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
459
- gr.Markdown(DESCRIPTION)
460
  with gr.Row():
 
461
  prompt = gr.Text(
462
  label="Prompt",
463
  show_label=False,
@@ -465,121 +379,122 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
465
  placeholder="Enter your prompt",
466
  container=False,
467
  )
468
- run_button = gr.Button("Run", scale=0)
469
- result = gr.Gallery(label="Result", columns=1, show_label=False)
470
-
471
- with gr.Row(visible=True):
472
- grid_size_selection = gr.Dropdown(
473
- choices=["2x1", "1x2", "2x2", "2x3", "3x2", "1x1"],
474
- value="1x1",
475
- label="Grid Size"
476
- )
477
-
478
- with gr.Row(visible=True):
479
- filter_selection = gr.Dropdown(
480
- show_label=True,
481
- container=True,
482
- interactive=True,
483
- choices=FILTER_NAMES,
484
- value=DEFAULT_FILTER_NAME,
485
- label="Filter Type",
486
- )
487
-
488
- with gr.Row(visible=True):
489
- collage_style_selection = gr.Dropdown(
490
- show_label=True,
491
- container=True,
492
- interactive=True,
493
- choices=COLLAGE_STYLE_NAMES,
494
- value=DEFAULT_COLLAGE_STYLE_NAME,
495
- label="Collage Template + Duotone Canvas",
496
- )
497
 
498
- with gr.Row(visible=True):
499
- style_selection = gr.Dropdown(
500
- show_label=True,
501
- container=True,
502
- interactive=True,
503
- choices=STYLE_NAMES,
504
- value=DEFAULT_STYLE_NAME,
505
- label="Quality Style",
506
- )
507
-
508
- with gr.Accordion("Advanced options", open=False):
509
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True, visible=True)
510
- negative_prompt = gr.Text(
511
- label="Negative prompt",
512
- max_lines=1,
513
- placeholder="Enter a negative prompt",
514
- value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
515
- visible=True,
516
- )
517
- with gr.Row():
518
- num_inference_steps = gr.Slider(
519
- label="Steps",
520
- minimum=10,
521
- maximum=60,
522
- step=1,
523
- value=30,
524
  )
525
- with gr.Row():
526
- num_images_per_prompt = gr.Slider(
527
- label="Images",
528
- minimum=1,
529
- maximum=5,
530
- step=1,
531
- value=2,
 
 
532
  )
533
- seed = gr.Slider(
534
- label="Seed",
535
- minimum=0,
536
- maximum=MAX_SEED,
537
- step=1,
538
- value=0,
539
- visible=True
540
- )
541
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
542
 
543
  with gr.Row(visible=True):
544
- width = gr.Slider(
545
- label="Width",
546
- minimum=512,
547
- maximum=2048,
548
- step=64,
549
- value=1024,
 
550
  )
551
- height = gr.Slider(
552
- label="Height",
553
- minimum=512,
554
- maximum=2048,
555
- step=64,
556
- value=1024,
 
 
 
557
  )
558
 
559
- with gr.Row():
560
- guidance_scale = gr.Slider(
561
- label="Guidance Scale",
562
- minimum=0.1,
563
- maximum=20.0,
564
- step=0.1,
565
- value=6,
 
566
  )
567
-
568
- gr.Examples(
569
- examples=examples,
570
- inputs=prompt,
571
- outputs=[result, seed],
572
- fn=generate,
573
- #cache_examples=True
574
- cache_examples=CACHE_EXAMPLES,
575
- )
576
-
577
- use_negative_prompt.change(
578
- fn=lambda x: gr.update(visible=x),
579
- inputs=use_negative_prompt,
580
- outputs=negative_prompt,
581
- api_name=False,
582
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583
 
584
  gr.on(
585
  triggers=[
@@ -606,33 +521,5 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
606
  api_name="run",
607
  )
608
 
609
- gr.Markdown("### Duotone Canvas")
610
- predefined_gallery = gr.Gallery(label="Duotone Canvas", columns=3, show_label=False, value=load_predefined_images1())
611
-
612
- gr.Markdown("### Image Gallery")
613
- predefined_gallery = gr.Gallery(label="Image Gallery", columns=3, show_label=False, value=load_predefined_images())
614
-
615
- gr.Markdown(DESCRIPTIONy)
616
- gr.Markdown("**Disclaimer/Note:**")
617
- #gr.Markdown("🏞️This is the demo space for generating images using Stable Diffusion with grids, filters, templates, quality styles, and types. Try the sample prompts to generate higher quality images. Try the sample prompts for generating higher quality images.<a href='https://huggingface.co/spaces/prithivMLmods/Top-Prompt-Collection' target='_blank'>Try prompts</a>.")
618
- #gr.Markdown("🏞️This repository helps you run and work with Hugging Face spaces on your local CPU or using Colab Notebooks. If you find it helpful, give it a like or starring the repository.<a href='https://github.com/PRITHIVSAKTHIUR/How-to-run-huggingface-spaces-on-local-machine-demo' target='_blank'>Visit repo.</a>.")
619
- #gr.Markdown("⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.")
620
- #gr.HTML(html_content)
621
-
622
- gr.Markdown("""
623
- <div style='text-align: justify;'>
624
- 🐡This is the demo space for generating images using Stable Diffusion with grids, filters, templates, quality styles, and types. Try the sample prompts to generate higher quality images. Try the sample prompts for generating higher quality images. <a href='https://huggingface.co/spaces/prithivMLmods/Top-Prompt-Collection' target='_blank'>Try prompts</a>.
625
- </div>""")
626
-
627
- gr.Markdown("""
628
- <div style='text-align: justify;'>
629
- 🐡This repository helps you run and work with Hugging Face spaces on your local CPU or using Colab Notebooks. If you find it helpful, give it a like or star the repository. <a href='https://github.com/PRITHIVSAKTHIUR/How-to-run-huggingface-spaces-on-local-machine-demo' target='_blank'>Visit repo.</a>.
630
- </div>""")
631
-
632
- gr.Markdown("""
633
- <div style='text-align: justify;'>
634
- ⚠️ Users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.
635
- </div>""")
636
-
637
  if __name__ == "__main__":
638
  demo.queue(max_size=40).launch()
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import random
3
  import uuid
 
10
  from diffusers import DiffusionPipeline, StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
  from typing import Tuple
12
 
 
13
  bad_words = json.loads(os.getenv('BAD_WORDS', "[]"))
14
  bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', "[]"))
15
  default_negative = os.getenv("default_negative","")
 
246
  negative = ""
247
  return p.replace("{prompt}", positive), n + negative
248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
  if not torch.cuda.is_available():
250
  DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
251
 
 
284
  seed = random.randint(0, MAX_SEED)
285
  return seed
286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  @spaces.GPU(enable_queue=True)
288
  def generate(
289
  prompt: str,
 
355
  save_image(grid_img, unique_name)
356
  return [unique_name], seed
357
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
  examples = [
359
 
360
  "Chocolate dripping from a donut against a yellow background, in the style of brocore, hyper-realistic oil --ar 2:3 --q 2 --s 750 --v 5 --ar 2:3 --q 2 --s 750 --v 5",
 
370
  '''
371
 
372
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
 
373
  with gr.Row():
374
+ with gr.Column(scale=1):
375
  prompt = gr.Text(
376
  label="Prompt",
377
  show_label=False,
 
379
  placeholder="Enter your prompt",
380
  container=False,
381
  )
382
+ run_button = gr.Button("Generate as ( 1024 x 1024 )", scale=0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
 
384
+ with gr.Row(visible=True):
385
+ grid_size_selection = gr.Dropdown(
386
+ choices=["2x1", "1x2", "2x2", "2x3", "3x2", "1x1"],
387
+ value="1x1",
388
+ label="Grid Size"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389
  )
390
+
391
+ with gr.Row(visible=True):
392
+ filter_selection = gr.Dropdown(
393
+ show_label=True,
394
+ container=True,
395
+ interactive=True,
396
+ choices=FILTER_NAMES,
397
+ value=DEFAULT_FILTER_NAME,
398
+ label="Filter Type",
399
  )
 
 
 
 
 
 
 
 
 
400
 
401
  with gr.Row(visible=True):
402
+ collage_style_selection = gr.Dropdown(
403
+ show_label=True,
404
+ container=True,
405
+ interactive=True,
406
+ choices=COLLAGE_STYLE_NAMES,
407
+ value=DEFAULT_COLLAGE_STYLE_NAME,
408
+ label="Collage Template + Duotone Canvas",
409
  )
410
+
411
+ with gr.Row(visible=True):
412
+ style_selection = gr.Dropdown(
413
+ show_label=True,
414
+ container=True,
415
+ interactive=True,
416
+ choices=STYLE_NAMES,
417
+ value=DEFAULT_STYLE_NAME,
418
+ label="Quality Style",
419
  )
420
 
421
+ with gr.Accordion("Advanced options", open=False):
422
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True, visible=True)
423
+ negative_prompt = gr.Text(
424
+ label="Negative prompt",
425
+ max_lines=1,
426
+ placeholder="Enter a negative prompt",
427
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
428
+ visible=True,
429
  )
430
+ with gr.Row():
431
+ num_inference_steps = gr.Slider(
432
+ label="Steps",
433
+ minimum=10,
434
+ maximum=60,
435
+ step=1,
436
+ value=30,
437
+ )
438
+ with gr.Row():
439
+ num_images_per_prompt = gr.Slider(
440
+ label="Images",
441
+ minimum=1,
442
+ maximum=5,
443
+ step=1,
444
+ value=2,
445
+ )
446
+ seed = gr.Slider(
447
+ label="Seed",
448
+ minimum=0,
449
+ maximum=MAX_SEED,
450
+ step=1,
451
+ value=0,
452
+ visible=True
453
+ )
454
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
455
+
456
+ with gr.Row(visible=True):
457
+ width = gr.Slider(
458
+ label="Width",
459
+ minimum=512,
460
+ maximum=2048,
461
+ step=64,
462
+ value=1024,
463
+ )
464
+ height = gr.Slider(
465
+ label="Height",
466
+ minimum=512,
467
+ maximum=2048,
468
+ step=64,
469
+ value=1024,
470
+ )
471
+
472
+ with gr.Row():
473
+ guidance_scale = gr.Slider(
474
+ label="Guidance Scale",
475
+ minimum=0.1,
476
+ maximum=20.0,
477
+ step=0.1,
478
+ value=6,
479
+ )
480
+
481
+ with gr.Column(scale=2):
482
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
483
+
484
+ gr.Examples(
485
+ examples=examples,
486
+ inputs=prompt,
487
+ outputs=[result, seed],
488
+ fn=generate,
489
+ cache_examples=CACHE_EXAMPLES,
490
+ )
491
+
492
+ use_negative_prompt.change(
493
+ fn=lambda x: gr.update(visible=x),
494
+ inputs=use_negative_prompt,
495
+ outputs=negative_prompt,
496
+ api_name=False,
497
+ )
498
 
499
  gr.on(
500
  triggers=[
 
521
  api_name="run",
522
  )
523
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
524
  if __name__ == "__main__":
525
  demo.queue(max_size=40).launch()