1inkusFace commited on
Commit
350939b
Β·
verified Β·
1 Parent(s): b57cec6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -361,21 +361,20 @@ def generate_30(
361
  latent_file_4_scale: float = 1.0,
362
  latent_file_5_scale: float = 1.0,
363
  samples=1,
364
- progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
365
-
366
  ):
367
  global captioner_2
368
  captioner2=captioner_2
369
  seed = random.randint(0, MAX_SEED)
370
  generator = torch.Generator(device='cuda').manual_seed(seed)
371
- if latent_file is not None: # Check if a latent file is provided
372
  sd_image_a = Image.open(latent_file.name).convert('RGB')
373
  sd_image_a.resize((224,224), Image.LANCZOS)
374
  #sd_image_a.resize((height,width), Image.LANCZOS)
375
  caption=[]
376
  caption_2=[]
377
  #caption.append(captioner(sd_image_a))
378
- #caption.append(captioner2(sd_image_a))
379
  #caption.append(captioner_3(sd_image_a))
380
  caption_2.append(captioning(sd_image_a))
381
  if latent_file_2 is not None: # Check if a latent file is provided
@@ -521,14 +520,14 @@ def generate_60(
521
  captioner2=captioner_2
522
  seed = random.randint(0, MAX_SEED)
523
  generator = torch.Generator(device='cuda').manual_seed(seed)
524
- if latent_file is not None: # Check if a latent file is provided
525
  sd_image_a = Image.open(latent_file.name).convert('RGB')
526
  sd_image_a.resize((224,224), Image.LANCZOS)
527
  #sd_image_a.resize((height,width), Image.LANCZOS)
528
  caption=[]
529
  caption_2=[]
530
  #caption.append(captioner(sd_image_a))
531
- #caption.append(captioner2(sd_image_a))
532
  #caption.append(captioner_3(sd_image_a))
533
  caption_2.append(captioning(sd_image_a))
534
  if latent_file_2 is not None: # Check if a latent file is provided
@@ -579,7 +578,7 @@ def generate_60(
579
  captions = flatten_and_stringify(captions)
580
  captions = " ".join(captions)
581
  print(captions)
582
- print("-- generating further caption --")
583
  global model5
584
  global processor5
585
  del captioner2
@@ -595,6 +594,7 @@ def generate_60(
595
  print(new_prompt)
596
  print("-- FINAL PROMPT --")
597
  print("-- ------------ --")
 
598
  #global model
599
  #global txt_tokenizer
600
  #del model
@@ -673,15 +673,14 @@ def generate_90(
673
  captioner2=captioner_2
674
  seed = random.randint(0, MAX_SEED)
675
  generator = torch.Generator(device='cuda').manual_seed(seed)
676
- if latent_file is not None: # Check if a latent file is provided
677
  sd_image_a = Image.open(latent_file.name).convert('RGB')
678
  sd_image_a.resize((224,224), Image.LANCZOS)
679
  #sd_image_a.resize((height,width), Image.LANCZOS)
680
  caption=[]
681
  caption_2=[]
682
  #caption.append(captioner(sd_image_a))
683
- cap=captioner2(sd_image_a)
684
- caption.append(cap)
685
  #caption.append(captioner_3(sd_image_a))
686
  caption_2.append(captioning(sd_image_a))
687
  if latent_file_2 is not None: # Check if a latent file is provided
@@ -732,7 +731,7 @@ def generate_90(
732
  captions = flatten_and_stringify(captions)
733
  captions = " ".join(captions)
734
  print(captions)
735
- print("-- generating further caption --")
736
  global model5
737
  global processor5
738
  del captioner2
@@ -748,6 +747,7 @@ def generate_90(
748
  print(new_prompt)
749
  print("-- FINAL PROMPT --")
750
  print("-- ------------ --")
 
751
  #global model
752
  #global txt_tokenizer
753
  #del model
 
361
  latent_file_4_scale: float = 1.0,
362
  latent_file_5_scale: float = 1.0,
363
  samples=1,
364
+ progress=gr.Progress(track_tqdm=True)
 
365
  ):
366
  global captioner_2
367
  captioner2=captioner_2
368
  seed = random.randint(0, MAX_SEED)
369
  generator = torch.Generator(device='cuda').manual_seed(seed)
370
+ if latent_file is not None:
371
  sd_image_a = Image.open(latent_file.name).convert('RGB')
372
  sd_image_a.resize((224,224), Image.LANCZOS)
373
  #sd_image_a.resize((height,width), Image.LANCZOS)
374
  caption=[]
375
  caption_2=[]
376
  #caption.append(captioner(sd_image_a))
377
+ caption.append(captioner2(sd_image_a))
378
  #caption.append(captioner_3(sd_image_a))
379
  caption_2.append(captioning(sd_image_a))
380
  if latent_file_2 is not None: # Check if a latent file is provided
 
520
  captioner2=captioner_2
521
  seed = random.randint(0, MAX_SEED)
522
  generator = torch.Generator(device='cuda').manual_seed(seed)
523
+ if latent_file is not None:
524
  sd_image_a = Image.open(latent_file.name).convert('RGB')
525
  sd_image_a.resize((224,224), Image.LANCZOS)
526
  #sd_image_a.resize((height,width), Image.LANCZOS)
527
  caption=[]
528
  caption_2=[]
529
  #caption.append(captioner(sd_image_a))
530
+ caption.append(captioner2(sd_image_a))
531
  #caption.append(captioner_3(sd_image_a))
532
  caption_2.append(captioning(sd_image_a))
533
  if latent_file_2 is not None: # Check if a latent file is provided
 
578
  captions = flatten_and_stringify(captions)
579
  captions = " ".join(captions)
580
  print(captions)
581
+ print("-- not generating further caption --")
582
  global model5
583
  global processor5
584
  del captioner2
 
594
  print(new_prompt)
595
  print("-- FINAL PROMPT --")
596
  print("-- ------------ --")
597
+
598
  #global model
599
  #global txt_tokenizer
600
  #del model
 
673
  captioner2=captioner_2
674
  seed = random.randint(0, MAX_SEED)
675
  generator = torch.Generator(device='cuda').manual_seed(seed)
676
+ if latent_file is not None:
677
  sd_image_a = Image.open(latent_file.name).convert('RGB')
678
  sd_image_a.resize((224,224), Image.LANCZOS)
679
  #sd_image_a.resize((height,width), Image.LANCZOS)
680
  caption=[]
681
  caption_2=[]
682
  #caption.append(captioner(sd_image_a))
683
+ caption.append(captioner2(sd_image_a))
 
684
  #caption.append(captioner_3(sd_image_a))
685
  caption_2.append(captioning(sd_image_a))
686
  if latent_file_2 is not None: # Check if a latent file is provided
 
731
  captions = flatten_and_stringify(captions)
732
  captions = " ".join(captions)
733
  print(captions)
734
+ print("-- not generating further caption --")
735
  global model5
736
  global processor5
737
  del captioner2
 
747
  print(new_prompt)
748
  print("-- FINAL PROMPT --")
749
  print("-- ------------ --")
750
+
751
  #global model
752
  #global txt_tokenizer
753
  #del model