1inkusFace commited on
Commit
c32268f
·
verified ·
1 Parent(s): 07691b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -20
app.py CHANGED
@@ -243,8 +243,8 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
243
  f.write(f"Model UNET: ford442/RealVisXL_V5.0_BF16 \n")
244
  upload_to_ftp(filename)
245
 
246
- def captioning(img):
247
- prompts_array = [
248
  "Adjectives describing this scene are:",
249
  # "The color scheme of this image is",
250
  # "This scene could be described in detail as",
@@ -255,6 +255,7 @@ def captioning(img):
255
  "The setting of this scene must be located",
256
  # Add more prompts here
257
  ]
 
258
  output_prompt=[]
259
  cap_prompt = (
260
  "Describe this image with a caption to be used for image generation."
@@ -442,7 +443,8 @@ def generate_30(
442
  latent_file_3_scale: float = 1.0,
443
  latent_file_4_scale: float = 1.0,
444
  latent_file_5_scale: float = 1.0,
445
- samples=1,
 
446
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
447
  ):
448
  prompt1=None
@@ -460,14 +462,14 @@ def generate_30(
460
  #sd_image_a.resize((height,width), Image.LANCZOS)
461
  caption=[]
462
  #caption.append(captioning(sd_image_a))
463
- prompt1, cap=captioning(sd_image_a)
464
  capt.append(cap)
465
  if latent_file_2 is not None: # Check if a latent file is provided
466
  sd_image_b = Image.open(latent_file_2.name).convert('RGB')
467
  #sd_image_b.resize((height,width), Image.LANCZOS)
468
  sd_image_b.resize((768,768), Image.LANCZOS)
469
  #caption.append(captioning(sd_image_b))
470
- prompt2, cap=captioning(sd_image_b)
471
  capt.append(cap)
472
  else:
473
  sd_image_b = None
@@ -476,7 +478,7 @@ def generate_30(
476
  #sd_image_c.resize((height,width), Image.LANCZOS)
477
  sd_image_c.resize((768,768), Image.LANCZOS)
478
  #caption.append(captioning(sd_image_c))
479
- prompt3, cap=captioning(sd_image_c)
480
  capt.append(cap)
481
  else:
482
  sd_image_c = None
@@ -485,7 +487,7 @@ def generate_30(
485
  #sd_image_d.resize((height,width), Image.LANCZOS)
486
  sd_image_d.resize((768,768), Image.LANCZOS)
487
  #caption.append(captioning(sd_image_d))
488
- prompt4, cap=captioning(sd_image_d)
489
  capt.append(cap)
490
  else:
491
  sd_image_d = None
@@ -494,7 +496,7 @@ def generate_30(
494
  #sd_image_e.resize((height,width), Image.LANCZOS)
495
  sd_image_e.resize((768,768), Image.LANCZOS)
496
  #caption.append(captioning(sd_image_e))
497
- prompt5, cap=captioning(sd_image_e)
498
  capt.append(cap)
499
  else:
500
  sd_image_e = None
@@ -593,7 +595,8 @@ def generate_60(
593
  latent_file_3_scale: float = 1.0,
594
  latent_file_4_scale: float = 1.0,
595
  latent_file_5_scale: float = 1.0,
596
- samples=1,
 
597
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
598
  ):
599
  prompt1=None
@@ -611,14 +614,14 @@ def generate_60(
611
  #sd_image_a.resize((height,width), Image.LANCZOS)
612
  caption=[]
613
  #caption.append(captioning(sd_image_a))
614
- prompt1, cap=captioning(sd_image_a)
615
  capt.append(cap)
616
  if latent_file_2 is not None: # Check if a latent file is provided
617
  sd_image_b = Image.open(latent_file_2.name).convert('RGB')
618
  #sd_image_b.resize((height,width), Image.LANCZOS)
619
  sd_image_b.resize((768,768), Image.LANCZOS)
620
  #caption.append(captioning(sd_image_b))
621
- prompt2, cap=captioning(sd_image_b)
622
  capt.append(cap)
623
  else:
624
  sd_image_b = None
@@ -627,7 +630,7 @@ def generate_60(
627
  #sd_image_c.resize((height,width), Image.LANCZOS)
628
  sd_image_c.resize((768,768), Image.LANCZOS)
629
  #caption.append(captioning(sd_image_c))
630
- prompt3, cap=captioning(sd_image_c)
631
  capt.append(cap)
632
  else:
633
  sd_image_c = None
@@ -636,7 +639,7 @@ def generate_60(
636
  #sd_image_d.resize((height,width), Image.LANCZOS)
637
  sd_image_d.resize((768,768), Image.LANCZOS)
638
  #caption.append(captioning(sd_image_d))
639
- prompt4, cap=captioning(sd_image_d)
640
  capt.append(cap)
641
  else:
642
  sd_image_d = None
@@ -645,7 +648,7 @@ def generate_60(
645
  #sd_image_e.resize((height,width), Image.LANCZOS)
646
  sd_image_e.resize((768,768), Image.LANCZOS)
647
  #caption.append(captioning(sd_image_e))
648
- prompt5, cap=captioning(sd_image_e)
649
  capt.append(cap)
650
  else:
651
  sd_image_e = None
@@ -744,7 +747,8 @@ def generate_90(
744
  latent_file_3_scale: float = 1.0,
745
  latent_file_4_scale: float = 1.0,
746
  latent_file_5_scale: float = 1.0,
747
- samples=1,
 
748
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
749
  ):
750
  prompt1=None
@@ -762,14 +766,14 @@ def generate_90(
762
  #sd_image_a.resize((height,width), Image.LANCZOS)
763
  caption=[]
764
  #caption.append(captioning(sd_image_a))
765
- prompt1, cap=captioning(sd_image_a)
766
  capt.append(cap)
767
  if latent_file_2 is not None: # Check if a latent file is provided
768
  sd_image_b = Image.open(latent_file_2.name).convert('RGB')
769
  #sd_image_b.resize((height,width), Image.LANCZOS)
770
  sd_image_b.resize((768,768), Image.LANCZOS)
771
  #caption.append(captioning(sd_image_b))
772
- prompt2, cap=captioning(sd_image_b)
773
  capt.append(cap)
774
  else:
775
  sd_image_b = None
@@ -778,7 +782,7 @@ def generate_90(
778
  #sd_image_c.resize((height,width), Image.LANCZOS)
779
  sd_image_c.resize((768,768), Image.LANCZOS)
780
  #caption.append(captioning(sd_image_c))
781
- prompt3, cap=captioning(sd_image_c)
782
  capt.append(cap)
783
  else:
784
  sd_image_c = None
@@ -787,7 +791,7 @@ def generate_90(
787
  #sd_image_d.resize((height,width), Image.LANCZOS)
788
  sd_image_d.resize((768,768), Image.LANCZOS)
789
  #caption.append(captioning(sd_image_d))
790
- prompt4, cap=captioning(sd_image_d)
791
  capt.append(cap)
792
  else:
793
  sd_image_d = None
@@ -796,7 +800,7 @@ def generate_90(
796
  #sd_image_e.resize((height,width), Image.LANCZOS)
797
  sd_image_e.resize((768,768), Image.LANCZOS)
798
  #caption.append(captioning(sd_image_e))
799
- prompt5, cap=captioning(sd_image_e)
800
  capt.append(cap)
801
  else:
802
  sd_image_e = None
@@ -990,6 +994,9 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
990
  value="('deformed', 'distorted', 'disfigured':1.3),'not photorealistic':1.5, 'poorly drawn', 'bad anatomy', 'wrong anatomy', 'extra limb', 'missing limb', 'floating limbs', 'poorly drawn hands', 'poorly drawn feet', 'poorly drawn face':1.3, 'out of frame', 'extra limbs', 'bad anatomy', 'bad art', 'beginner', 'distorted face','amateur'",
991
  visible=True,
992
  )
 
 
 
993
  samples = gr.Slider(
994
  label="Samples",
995
  minimum=0,
@@ -1070,6 +1077,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
1070
  file_4_strength,
1071
  file_5_strength,
1072
  samples,
 
1073
  ],
1074
  outputs=[result],
1075
  )
@@ -1102,6 +1110,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
1102
  file_4_strength,
1103
  file_5_strength,
1104
  samples,
 
1105
  ],
1106
  outputs=[result],
1107
  )
@@ -1134,6 +1143,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
1134
  file_4_strength,
1135
  file_5_strength,
1136
  samples,
 
1137
  ],
1138
  outputs=[result],
1139
  )
 
243
  f.write(f"Model UNET: ford442/RealVisXL_V5.0_BF16 \n")
244
  upload_to_ftp(filename)
245
 
246
+ def captioning(img, prompts_array):
247
+ '''prompts_array = [
248
  "Adjectives describing this scene are:",
249
  # "The color scheme of this image is",
250
  # "This scene could be described in detail as",
 
255
  "The setting of this scene must be located",
256
  # Add more prompts here
257
  ]
258
+ '''
259
  output_prompt=[]
260
  cap_prompt = (
261
  "Describe this image with a caption to be used for image generation."
 
443
  latent_file_3_scale: float = 1.0,
444
  latent_file_4_scale: float = 1.0,
445
  latent_file_5_scale: float = 1.0,
446
+ samples: int = 1,
447
+ prompt_array: list[str] = None,
448
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
449
  ):
450
  prompt1=None
 
462
  #sd_image_a.resize((height,width), Image.LANCZOS)
463
  caption=[]
464
  #caption.append(captioning(sd_image_a))
465
+ prompt1, cap=captioning(sd_image_a,prompt_array)
466
  capt.append(cap)
467
  if latent_file_2 is not None: # Check if a latent file is provided
468
  sd_image_b = Image.open(latent_file_2.name).convert('RGB')
469
  #sd_image_b.resize((height,width), Image.LANCZOS)
470
  sd_image_b.resize((768,768), Image.LANCZOS)
471
  #caption.append(captioning(sd_image_b))
472
+ prompt2, cap=captioning(sd_image_b,prompt_array)
473
  capt.append(cap)
474
  else:
475
  sd_image_b = None
 
478
  #sd_image_c.resize((height,width), Image.LANCZOS)
479
  sd_image_c.resize((768,768), Image.LANCZOS)
480
  #caption.append(captioning(sd_image_c))
481
+ prompt3, cap=captioning(sd_image_c,prompt_array)
482
  capt.append(cap)
483
  else:
484
  sd_image_c = None
 
487
  #sd_image_d.resize((height,width), Image.LANCZOS)
488
  sd_image_d.resize((768,768), Image.LANCZOS)
489
  #caption.append(captioning(sd_image_d))
490
+ prompt4, cap=captioning(sd_image_d,prompt_array)
491
  capt.append(cap)
492
  else:
493
  sd_image_d = None
 
496
  #sd_image_e.resize((height,width), Image.LANCZOS)
497
  sd_image_e.resize((768,768), Image.LANCZOS)
498
  #caption.append(captioning(sd_image_e))
499
+ prompt5, cap=captioning(sd_image_e,prompt_array)
500
  capt.append(cap)
501
  else:
502
  sd_image_e = None
 
595
  latent_file_3_scale: float = 1.0,
596
  latent_file_4_scale: float = 1.0,
597
  latent_file_5_scale: float = 1.0,
598
+ samples: int = 1,
599
+ prompt_array: list[str] = None,
600
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
601
  ):
602
  prompt1=None
 
614
  #sd_image_a.resize((height,width), Image.LANCZOS)
615
  caption=[]
616
  #caption.append(captioning(sd_image_a))
617
+ prompt1, cap=captioning(sd_image_a,prompt_array)
618
  capt.append(cap)
619
  if latent_file_2 is not None: # Check if a latent file is provided
620
  sd_image_b = Image.open(latent_file_2.name).convert('RGB')
621
  #sd_image_b.resize((height,width), Image.LANCZOS)
622
  sd_image_b.resize((768,768), Image.LANCZOS)
623
  #caption.append(captioning(sd_image_b))
624
+ prompt2, cap=captioning(sd_image_b,prompt_array)
625
  capt.append(cap)
626
  else:
627
  sd_image_b = None
 
630
  #sd_image_c.resize((height,width), Image.LANCZOS)
631
  sd_image_c.resize((768,768), Image.LANCZOS)
632
  #caption.append(captioning(sd_image_c))
633
+ prompt3, cap=captioning(sd_image_c,prompt_array)
634
  capt.append(cap)
635
  else:
636
  sd_image_c = None
 
639
  #sd_image_d.resize((height,width), Image.LANCZOS)
640
  sd_image_d.resize((768,768), Image.LANCZOS)
641
  #caption.append(captioning(sd_image_d))
642
+ prompt4, cap=captioning(sd_image_d,prompt_array)
643
  capt.append(cap)
644
  else:
645
  sd_image_d = None
 
648
  #sd_image_e.resize((height,width), Image.LANCZOS)
649
  sd_image_e.resize((768,768), Image.LANCZOS)
650
  #caption.append(captioning(sd_image_e))
651
+ prompt5, cap=captioning(sd_image_e,prompt_array)
652
  capt.append(cap)
653
  else:
654
  sd_image_e = None
 
747
  latent_file_3_scale: float = 1.0,
748
  latent_file_4_scale: float = 1.0,
749
  latent_file_5_scale: float = 1.0,
750
+ samples: int = 1,
751
+ prompt_array: list[str] = None,
752
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
753
  ):
754
  prompt1=None
 
766
  #sd_image_a.resize((height,width), Image.LANCZOS)
767
  caption=[]
768
  #caption.append(captioning(sd_image_a))
769
+ prompt1, cap=captioning(sd_image_a,prompt_array)
770
  capt.append(cap)
771
  if latent_file_2 is not None: # Check if a latent file is provided
772
  sd_image_b = Image.open(latent_file_2.name).convert('RGB')
773
  #sd_image_b.resize((height,width), Image.LANCZOS)
774
  sd_image_b.resize((768,768), Image.LANCZOS)
775
  #caption.append(captioning(sd_image_b))
776
+ prompt2, cap=captioning(sd_image_b,prompt_array)
777
  capt.append(cap)
778
  else:
779
  sd_image_b = None
 
782
  #sd_image_c.resize((height,width), Image.LANCZOS)
783
  sd_image_c.resize((768,768), Image.LANCZOS)
784
  #caption.append(captioning(sd_image_c))
785
+ prompt3, cap=captioning(sd_image_c,prompt_array)
786
  capt.append(cap)
787
  else:
788
  sd_image_c = None
 
791
  #sd_image_d.resize((height,width), Image.LANCZOS)
792
  sd_image_d.resize((768,768), Image.LANCZOS)
793
  #caption.append(captioning(sd_image_d))
794
+ prompt4, cap=captioning(sd_image_d,prompt_array)
795
  capt.append(cap)
796
  else:
797
  sd_image_d = None
 
800
  #sd_image_e.resize((height,width), Image.LANCZOS)
801
  sd_image_e.resize((768,768), Image.LANCZOS)
802
  #caption.append(captioning(sd_image_e))
803
+ prompt5, cap=captioning(sd_image_e,prompt_array)
804
  capt.append(cap)
805
  else:
806
  sd_image_e = None
 
994
  value="('deformed', 'distorted', 'disfigured':1.3),'not photorealistic':1.5, 'poorly drawn', 'bad anatomy', 'wrong anatomy', 'extra limb', 'missing limb', 'floating limbs', 'poorly drawn hands', 'poorly drawn feet', 'poorly drawn face':1.3, 'out of frame', 'extra limbs', 'bad anatomy', 'bad art', 'beginner', 'distorted face','amateur'",
995
  visible=True,
996
  )
997
+ prompts_array = []
998
+ for i in range(5): # Create 5 text fields for prompts
999
+ prompts_array.append(gr.Textbox(label=f"Prompt {i+1}", lines=1))
1000
  samples = gr.Slider(
1001
  label="Samples",
1002
  minimum=0,
 
1077
  file_4_strength,
1078
  file_5_strength,
1079
  samples,
1080
+ prompts_array, # Pass prompts_array to the generate function
1081
  ],
1082
  outputs=[result],
1083
  )
 
1110
  file_4_strength,
1111
  file_5_strength,
1112
  samples,
1113
+ prompts_array, # Pass prompts_array to the generate function
1114
  ],
1115
  outputs=[result],
1116
  )
 
1143
  file_4_strength,
1144
  file_5_strength,
1145
  samples,
1146
+ prompts_array, # Pass prompts_array to the generate function
1147
  ],
1148
  outputs=[result],
1149
  )