1inkusFace commited on
Commit
4921f69
·
verified ·
1 Parent(s): fc033c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -21
app.py CHANGED
@@ -412,9 +412,7 @@ def generate_30(
412
  del processor5
413
  gc.collect()
414
  torch.cuda.empty_cache()
415
- expand_prompt(prompt)
416
- expand_prompt(caption)
417
- expanded = expand_prompt(caption_2)
418
  expanded_1 = expanded[0]
419
  expanded_2 = expanded[1]
420
  global model
@@ -428,7 +426,6 @@ def generate_30(
428
  pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
429
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
430
  #pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
431
-
432
  print('-- generating image --')
433
  sd_image = ip_model.generate(
434
  pil_image_1=sd_image_a,
@@ -491,35 +488,82 @@ def generate_60(
491
  samples=1,
492
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
493
  ):
494
- pipe.text_encoder=text_encoder_1
495
- pipe.text_encoder_2=text_encoder_2
496
  seed = random.randint(0, MAX_SEED)
497
  generator = torch.Generator(device='cuda').manual_seed(seed)
498
  if latent_file is not None: # Check if a latent file is provided
499
- sd_image_a = Image.open(latent_file.name)
 
 
 
 
 
 
 
500
  if latent_file_2 is not None: # Check if a latent file is provided
501
- sd_image_b = Image.open(latent_file_2.name)
502
  sd_image_b.resize((height,width), Image.LANCZOS)
 
 
 
 
503
  else:
504
  sd_image_b = None
505
  if latent_file_3 is not None: # Check if a latent file is provided
506
- sd_image_c = Image.open(latent_file_3.name)
507
  sd_image_c.resize((height,width), Image.LANCZOS)
 
 
 
 
508
  else:
509
  sd_image_c = None
510
  if latent_file_4 is not None: # Check if a latent file is provided
511
- sd_image_d = Image.open(latent_file_4.name)
512
  sd_image_d.resize((height,width), Image.LANCZOS)
 
 
 
 
513
  else:
514
  sd_image_d = None
515
  if latent_file_5 is not None: # Check if a latent file is provided
516
- sd_image_e = Image.open(latent_file_5.name)
517
  sd_image_e.resize((height,width), Image.LANCZOS)
 
 
 
 
518
  else:
519
  sd_image_e = None
520
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
521
  filename= f'rv_IP_{timestamp}.png'
522
  print("-- using image file --")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
523
  print('-- generating image --')
524
  sd_image = ip_model.generate(
525
  pil_image_1=sd_image_a,
@@ -527,7 +571,8 @@ def generate_60(
527
  pil_image_3=sd_image_c,
528
  pil_image_4=sd_image_d,
529
  pil_image_5=sd_image_e,
530
- prompt=prompt,
 
531
  negative_prompt=negative_prompt,
532
  text_scale=text_scale,
533
  ip_scale=ip_scale,
@@ -581,44 +626,91 @@ def generate_90(
581
  samples=1,
582
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
583
  ):
584
- pipe.text_encoder=text_encoder_1
585
- pipe.text_encoder_2=text_encoder_2
586
  seed = random.randint(0, MAX_SEED)
587
  generator = torch.Generator(device='cuda').manual_seed(seed)
588
  if latent_file is not None: # Check if a latent file is provided
589
- sd_image_a = Image.open(latent_file.name)
 
 
 
 
 
 
 
590
  if latent_file_2 is not None: # Check if a latent file is provided
591
- sd_image_b = Image.open(latent_file_2.name)
592
  sd_image_b.resize((height,width), Image.LANCZOS)
 
 
 
 
593
  else:
594
  sd_image_b = None
595
  if latent_file_3 is not None: # Check if a latent file is provided
596
- sd_image_c = Image.open(latent_file_3.name)
597
  sd_image_c.resize((height,width), Image.LANCZOS)
 
 
 
 
598
  else:
599
  sd_image_c = None
600
  if latent_file_4 is not None: # Check if a latent file is provided
601
- sd_image_d = Image.open(latent_file_4.name)
602
  sd_image_d.resize((height,width), Image.LANCZOS)
 
 
 
 
603
  else:
604
  sd_image_d = None
605
  if latent_file_5 is not None: # Check if a latent file is provided
606
- sd_image_e = Image.open(latent_file_5.name)
607
  sd_image_e.resize((height,width), Image.LANCZOS)
 
 
 
 
608
  else:
609
  sd_image_e = None
610
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
611
  filename= f'rv_IP_{timestamp}.png'
612
  print("-- using image file --")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
613
  print('-- generating image --')
614
- #with torch.no_grad():
615
  sd_image = ip_model.generate(
616
  pil_image_1=sd_image_a,
617
  pil_image_2=sd_image_b,
618
  pil_image_3=sd_image_c,
619
  pil_image_4=sd_image_d,
620
  pil_image_5=sd_image_e,
621
- prompt=prompt,
 
622
  negative_prompt=negative_prompt,
623
  text_scale=text_scale,
624
  ip_scale=ip_scale,
 
412
  del processor5
413
  gc.collect()
414
  torch.cuda.empty_cache()
415
+ expanded = expand_prompt(prompt+caption+caption_2)
 
 
416
  expanded_1 = expanded[0]
417
  expanded_2 = expanded[1]
418
  global model
 
426
  pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
427
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
428
  #pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
 
429
  print('-- generating image --')
430
  sd_image = ip_model.generate(
431
  pil_image_1=sd_image_a,
 
488
  samples=1,
489
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
490
  ):
491
+ global captioner_2
492
+ captioner2=captioner_2
493
  seed = random.randint(0, MAX_SEED)
494
  generator = torch.Generator(device='cuda').manual_seed(seed)
495
  if latent_file is not None: # Check if a latent file is provided
496
+ sd_image_a = Image.open(latent_file.name).convert('RGB')
497
+ sd_image_a.resize((height,width), Image.LANCZOS)
498
+ caption=[]
499
+ caption_2=[]
500
+ #caption.append(captioner(sd_image_a))
501
+ caption.append(captioner2(sd_image_a))
502
+ #caption.append(captioner_3(sd_image_a))
503
+ caption_2.append(captioning(sd_image_a))
504
  if latent_file_2 is not None: # Check if a latent file is provided
505
+ sd_image_b = Image.open(latent_file_2.name).convert('RGB')
506
  sd_image_b.resize((height,width), Image.LANCZOS)
507
+ #caption.append(captioner(sd_image_b))
508
+ caption.append(captioner2(sd_image_b))
509
+ #caption.append(captioner_3(sd_image_b))
510
+ caption_2.append(captioning(sd_image_b))
511
  else:
512
  sd_image_b = None
513
  if latent_file_3 is not None: # Check if a latent file is provided
514
+ sd_image_c = Image.open(latent_file_3.name).convert('RGB')
515
  sd_image_c.resize((height,width), Image.LANCZOS)
516
+ #caption.append(captioner(sd_image_c))
517
+ caption.append(captioner2(sd_image_c))
518
+ #caption.append(captioner_3(sd_image_c))
519
+ caption_2.append(captioning(sd_image_c))
520
  else:
521
  sd_image_c = None
522
  if latent_file_4 is not None: # Check if a latent file is provided
523
+ sd_image_d = Image.open(latent_file_4.name).convert('RGB')
524
  sd_image_d.resize((height,width), Image.LANCZOS)
525
+ #caption.append(captioner(sd_image_d))
526
+ caption.append(captioner2(sd_image_d))
527
+ #caption.append(captioner_3(sd_image_d))
528
+ caption_2.append(captioning(sd_image_d))
529
  else:
530
  sd_image_d = None
531
  if latent_file_5 is not None: # Check if a latent file is provided
532
+ sd_image_e = Image.open(latent_file_5.name).convert('RGB')
533
  sd_image_e.resize((height,width), Image.LANCZOS)
534
+ #caption.append(captioner(sd_image_e))
535
+ caption.append(captioner2(sd_image_e))
536
+ #caption.append(captioner_3(sd_image_e))
537
+ caption_2.append(captioning(sd_image_e))
538
  else:
539
  sd_image_e = None
540
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
541
  filename= f'rv_IP_{timestamp}.png'
542
  print("-- using image file --")
543
+ print(caption)
544
+ print(caption_2)
545
+ print("-- generating further caption --")
546
+ global model5
547
+ global processor5
548
+ del captioner2
549
+ del model5
550
+ del processor5
551
+ gc.collect()
552
+ torch.cuda.empty_cache()
553
+ expanded = expand_prompt(prompt+caption+caption_2)
554
+ expanded_1 = expanded[0]
555
+ expanded_2 = expanded[1]
556
+ global model
557
+ global txt_tokenizer
558
+ del model
559
+ del txt_tokenizer
560
+ gc.collect()
561
+ torch.cuda.empty_cache()
562
+ global text_encoder_1
563
+ global text_encoder_2
564
+ pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
565
+ pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
566
+ #pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
567
  print('-- generating image --')
568
  sd_image = ip_model.generate(
569
  pil_image_1=sd_image_a,
 
571
  pil_image_3=sd_image_c,
572
  pil_image_4=sd_image_d,
573
  pil_image_5=sd_image_e,
574
+ prompt=prompt+' '+expanded_1,
575
+ prompt_2=expanded_2,
576
  negative_prompt=negative_prompt,
577
  text_scale=text_scale,
578
  ip_scale=ip_scale,
 
626
  samples=1,
627
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
628
  ):
629
+ global captioner_2
630
+ captioner2=captioner_2
631
  seed = random.randint(0, MAX_SEED)
632
  generator = torch.Generator(device='cuda').manual_seed(seed)
633
  if latent_file is not None: # Check if a latent file is provided
634
+ sd_image_a = Image.open(latent_file.name).convert('RGB')
635
+ sd_image_a.resize((height,width), Image.LANCZOS)
636
+ caption=[]
637
+ caption_2=[]
638
+ #caption.append(captioner(sd_image_a))
639
+ caption.append(captioner2(sd_image_a))
640
+ #caption.append(captioner_3(sd_image_a))
641
+ caption_2.append(captioning(sd_image_a))
642
  if latent_file_2 is not None: # Check if a latent file is provided
643
+ sd_image_b = Image.open(latent_file_2.name).convert('RGB')
644
  sd_image_b.resize((height,width), Image.LANCZOS)
645
+ #caption.append(captioner(sd_image_b))
646
+ caption.append(captioner2(sd_image_b))
647
+ #caption.append(captioner_3(sd_image_b))
648
+ caption_2.append(captioning(sd_image_b))
649
  else:
650
  sd_image_b = None
651
  if latent_file_3 is not None: # Check if a latent file is provided
652
+ sd_image_c = Image.open(latent_file_3.name).convert('RGB')
653
  sd_image_c.resize((height,width), Image.LANCZOS)
654
+ #caption.append(captioner(sd_image_c))
655
+ caption.append(captioner2(sd_image_c))
656
+ #caption.append(captioner_3(sd_image_c))
657
+ caption_2.append(captioning(sd_image_c))
658
  else:
659
  sd_image_c = None
660
  if latent_file_4 is not None: # Check if a latent file is provided
661
+ sd_image_d = Image.open(latent_file_4.name).convert('RGB')
662
  sd_image_d.resize((height,width), Image.LANCZOS)
663
+ #caption.append(captioner(sd_image_d))
664
+ caption.append(captioner2(sd_image_d))
665
+ #caption.append(captioner_3(sd_image_d))
666
+ caption_2.append(captioning(sd_image_d))
667
  else:
668
  sd_image_d = None
669
  if latent_file_5 is not None: # Check if a latent file is provided
670
+ sd_image_e = Image.open(latent_file_5.name).convert('RGB')
671
  sd_image_e.resize((height,width), Image.LANCZOS)
672
+ #caption.append(captioner(sd_image_e))
673
+ caption.append(captioner2(sd_image_e))
674
+ #caption.append(captioner_3(sd_image_e))
675
+ caption_2.append(captioning(sd_image_e))
676
  else:
677
  sd_image_e = None
678
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
679
  filename= f'rv_IP_{timestamp}.png'
680
  print("-- using image file --")
681
+ print(caption)
682
+ print(caption_2)
683
+ print("-- generating further caption --")
684
+ global model5
685
+ global processor5
686
+ del captioner2
687
+ del model5
688
+ del processor5
689
+ gc.collect()
690
+ torch.cuda.empty_cache()
691
+ expanded = expand_prompt(prompt+caption+caption_2)
692
+ expanded_1 = expanded[0]
693
+ expanded_2 = expanded[1]
694
+ global model
695
+ global txt_tokenizer
696
+ del model
697
+ del txt_tokenizer
698
+ gc.collect()
699
+ torch.cuda.empty_cache()
700
+ global text_encoder_1
701
+ global text_encoder_2
702
+ pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
703
+ pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
704
+ #pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
705
  print('-- generating image --')
 
706
  sd_image = ip_model.generate(
707
  pil_image_1=sd_image_a,
708
  pil_image_2=sd_image_b,
709
  pil_image_3=sd_image_c,
710
  pil_image_4=sd_image_d,
711
  pil_image_5=sd_image_e,
712
+ prompt=prompt+' '+expanded_1,
713
+ prompt_2=expanded_2,
714
  negative_prompt=negative_prompt,
715
  text_scale=text_scale,
716
  ip_scale=ip_scale,