ford442 commited on
Commit
23ec91d
·
verified ·
1 Parent(s): c0ef521

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -48
app.py CHANGED
@@ -33,6 +33,7 @@ torch.backends.cudnn.benchmark = False
33
 
34
  torch.set_float32_matmul_precision("highest")
35
  os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
 
36
 
37
  FTP_HOST = "1ink.us"
38
  FTP_USER = "ford442"
@@ -120,46 +121,23 @@ def load_and_prepare_model():
120
  text_encoder_2=None,
121
  vae=None,
122
  )
123
-
124
- '''
125
- scaling_factor (`float`, *optional*, defaults to 0.18215):
126
- The component-wise standard deviation of the trained latent space computed using the first batch of the
127
- training set. This is used to scale the latent space to have unit variance when training the diffusion
128
- model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
129
- diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
130
- / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
131
- Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
132
- force_upcast (`bool`, *optional*, default to `True`):
133
- If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
134
- can be fine-tuned / trained to a lower range without loosing too much precision in which case
135
- `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
136
-
137
- '''
138
  pipe.vae=vaeX
139
  pipe.to(device=device, dtype=torch.bfloat16)
140
- #pipe.vae.to(device=device, dtype=torch.bfloat16)
141
- #pipe.vae.do_resize=False
142
- #pipe.vae.do_rescale=False
143
- #pipe.vae.do_convert_rgb=True
144
- #pipe.vae.vae_scale_factor=8 #pipe.unet.set_default_attn_processor()
145
  pipe.vae.set_default_attn_processor()
146
  print(f'Pipeline: ')
147
- #print(f'_optional_components: {pipe._optional_components}')
148
- #print(f'watermark: {pipe.watermark}')
149
  print(f'image_processor: {pipe.image_processor}')
150
- #print(f'feature_extractor: {pipe.feature_extractor}')
151
  print(f'init noise scale: {pipe.scheduler.init_noise_sigma}')
152
- #print(f'UNET: {pipe.unet}')
153
  pipe.watermark=None
154
- pipe.safety_checker=None
155
  return pipe
156
-
157
  # Preload and compile both models
158
  pipe = load_and_prepare_model()
159
  ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
160
  text_encoder=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True).to(device=device, dtype=torch.bfloat16)
161
  text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
162
-
163
  MAX_SEED = np.iinfo(np.int32).max
164
 
165
  neg_prompt_2 = " 'non-photorealistic':1.5, 'unrealistic skin','unattractive face':1.3, 'low quality':1.1, ('dull color scheme', 'dull colors', 'digital noise':1.2),'amateurish', 'poorly drawn face':1.3, 'poorly drawn', 'distorted face', 'low resolution', 'simplistic' "
@@ -176,7 +154,7 @@ def upload_to_ftp(filename):
176
  print(f"Uploaded {filename} to FTP server")
177
  except Exception as e:
178
  print(f"FTP upload error: {e}")
179
-
180
  def save_image(img):
181
  unique_name = str(uuid.uuid4()) + ".png"
182
  img.save(unique_name,optimize=False,compress_level=0)
@@ -195,8 +173,14 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
195
  f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
196
  f.write(f"Model VAE: sdxl-vae to bfloat safetensor=false before cuda then attn_proc / scale factor 8 \n")
197
  f.write(f"Model UNET: ford442/RealVisXL_V5.0_BF16 \n")
198
- upload_to_ftp(filename)
199
-
 
 
 
 
 
 
200
  @spaces.GPU(duration=40)
201
  def generate_30(
202
  prompt: str = "",
@@ -282,8 +266,8 @@ def generate_30(
282
  downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
283
  downscale_path = f"rvIP_upscale_{timestamp}.png"
284
  downscale1.save(downscale_path,optimize=False,compress_level=0)
285
- upload_to_ftp(downscale_path)
286
- image_paths = [save_image(downscale1)]
287
  else:
288
  print('-- IMAGE REQUIRED --')
289
  return image_paths
@@ -372,8 +356,8 @@ def generate_60(
372
  downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
373
  downscale_path = f"rvIP_upscale_{timestamp}.png"
374
  downscale1.save(downscale_path,optimize=False,compress_level=0)
375
- upload_to_ftp(downscale_path)
376
- image_paths = [save_image(downscale1)]
377
  else:
378
  print('-- IMAGE REQUIRED --')
379
  return image_paths
@@ -463,8 +447,8 @@ def generate_90(
463
  downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
464
  downscale_path = f"rvIP_upscale_{timestamp}.png"
465
  downscale1.save(downscale_path,optimize=False,compress_level=0)
466
- upload_to_ftp(downscale_path)
467
- image_paths = [save_image(downscale1)]
468
  else:
469
  print('-- IMAGE REQUIRED --')
470
  return image_paths
@@ -517,7 +501,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
517
  run_button_30 = gr.Button("Run 30 Seconds", scale=0)
518
  run_button_60 = gr.Button("Run 60 Seconds", scale=0)
519
  run_button_90 = gr.Button("Run 90 Seconds", scale=0)
520
- result = gr.Gallery(label="Result", columns=1, show_label=False)
521
  ip_strength = gr.Slider(
522
  label="Image Strength",
523
  minimum=0.0,
@@ -526,7 +510,9 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
526
  value=1.0,
527
  )
528
  with gr.Row():
529
- latent_file = gr.File(label="Image Prompt (Required)")
 
 
530
  file_1_strength = gr.Slider(
531
  label="Img 1 %",
532
  minimum=0.0,
@@ -534,7 +520,9 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
534
  step=0.01,
535
  value=1.0,
536
  )
537
- latent_file_2 = gr.File(label="Image Prompt 2 (Optional)")
 
 
538
  file_2_strength = gr.Slider(
539
  label="Img 2 %",
540
  minimum=0.0,
@@ -542,7 +530,9 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
542
  step=0.01,
543
  value=1.0,
544
  )
545
- latent_file_3 = gr.File(label="Image Prompt 3 (Optional)")
 
 
546
  file_3_strength = gr.Slider(
547
  label="Img 3 %",
548
  minimum=0.0,
@@ -550,7 +540,9 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
550
  step=0.01,
551
  value=1.0,
552
  )
553
- latent_file_4 = gr.File(label="Image Prompt 4 (Optional)")
 
 
554
  file_4_strength = gr.Slider(
555
  label="Img 4 %",
556
  minimum=0.0,
@@ -558,7 +550,9 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
558
  step=0.01,
559
  value=1.0,
560
  )
561
- latent_file_5 = gr.File(label="Image Prompt 5 (Optional)")
 
 
562
  file_5_strength = gr.Slider(
563
  label="Img 5 %",
564
  minimum=0.0,
@@ -636,7 +630,33 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
636
  outputs=negative_prompt,
637
  api_name=False,
638
  )
639
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
640
  gr.on(
641
  triggers=[
642
  run_button_30.click,
@@ -668,7 +688,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
668
  ],
669
  outputs=[result],
670
  )
671
-
672
  gr.on(
673
  triggers=[
674
  run_button_60.click,
@@ -700,7 +720,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
700
  ],
701
  outputs=[result],
702
  )
703
-
704
  gr.on(
705
  triggers=[
706
  run_button_90.click,
@@ -749,7 +769,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
749
  gr.Markdown(
750
  """
751
  <div style="text-align: justify;">
752
- ⚡This is the demo space for generating images using Stable Diffusion XL with quality styles, different models, and types. Try the sample prompts to generate higher quality images. Try the sample prompts for generating higher quality images.
753
  <a href='https://huggingface.co/spaces/prithivMLmods/Top-Prompt-Collection' target='_blank'>Try prompts</a>.
754
  </div>
755
  """)
@@ -759,12 +779,12 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
759
  <div style="text-align: justify;">
760
  ⚠️ Users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.
761
  </div>
762
- """)
763
 
764
  def text_generation(input_text, seed):
765
  full_prompt = "Text Generator Application by ecarbo"
766
  return full_prompt
767
-
768
  title = "Text Generator Demo GPT-Neo"
769
  description = "Text Generator Application by ecarbo"
770
 
 
33
 
34
  torch.set_float32_matmul_precision("highest")
35
  os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
36
+ os.environ["SAFETENSORS_FAST_GPU"] = "1"
37
 
38
  FTP_HOST = "1ink.us"
39
  FTP_USER = "ford442"
 
121
  text_encoder_2=None,
122
  vae=None,
123
  )
124
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  pipe.vae=vaeX
126
  pipe.to(device=device, dtype=torch.bfloat16)
 
 
 
 
 
127
  pipe.vae.set_default_attn_processor()
128
  print(f'Pipeline: ')
 
 
129
  print(f'image_processor: {pipe.image_processor}')
 
130
  print(f'init noise scale: {pipe.scheduler.init_noise_sigma}')
 
131
  pipe.watermark=None
132
+ pipe.safety_checker=None
133
  return pipe
134
+
135
  # Preload and compile both models
136
  pipe = load_and_prepare_model()
137
  ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
138
  text_encoder=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True).to(device=device, dtype=torch.bfloat16)
139
  text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
140
+
141
  MAX_SEED = np.iinfo(np.int32).max
142
 
143
  neg_prompt_2 = " 'non-photorealistic':1.5, 'unrealistic skin','unattractive face':1.3, 'low quality':1.1, ('dull color scheme', 'dull colors', 'digital noise':1.2),'amateurish', 'poorly drawn face':1.3, 'poorly drawn', 'distorted face', 'low resolution', 'simplistic' "
 
154
  print(f"Uploaded {filename} to FTP server")
155
  except Exception as e:
156
  print(f"FTP upload error: {e}")
157
+
158
  def save_image(img):
159
  unique_name = str(uuid.uuid4()) + ".png"
160
  img.save(unique_name,optimize=False,compress_level=0)
 
173
  f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
174
  f.write(f"Model VAE: sdxl-vae to bfloat safetensor=false before cuda then attn_proc / scale factor 8 \n")
175
  f.write(f"Model UNET: ford442/RealVisXL_V5.0_BF16 \n")
176
+ upload_to_ftp(filename)
177
+
178
+ def display_image(file):
179
+ if file is not None:
180
+ return Image.open(file.name)
181
+ else:
182
+ return None
183
+
184
  @spaces.GPU(duration=40)
185
  def generate_30(
186
  prompt: str = "",
 
266
  downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
267
  downscale_path = f"rvIP_upscale_{timestamp}.png"
268
  downscale1.save(downscale_path,optimize=False,compress_level=0)
269
+ upload_to_ftp(downscale_path)
270
+ image_paths = [save_image(downscale1)]
271
  else:
272
  print('-- IMAGE REQUIRED --')
273
  return image_paths
 
356
  downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
357
  downscale_path = f"rvIP_upscale_{timestamp}.png"
358
  downscale1.save(downscale_path,optimize=False,compress_level=0)
359
+ upload_to_ftp(downscale_path)
360
+ image_paths = [save_image(downscale1)]
361
  else:
362
  print('-- IMAGE REQUIRED --')
363
  return image_paths
 
447
  downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
448
  downscale_path = f"rvIP_upscale_{timestamp}.png"
449
  downscale1.save(downscale_path,optimize=False,compress_level=0)
450
+ upload_to_ftp(downscale_path)
451
+ image_paths = [save_image(downscale1)]
452
  else:
453
  print('-- IMAGE REQUIRED --')
454
  return image_paths
 
501
  run_button_30 = gr.Button("Run 30 Seconds", scale=0)
502
  run_button_60 = gr.Button("Run 60 Seconds", scale=0)
503
  run_button_90 = gr.Button("Run 90 Seconds", scale=0)
504
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
505
  ip_strength = gr.Slider(
506
  label="Image Strength",
507
  minimum=0.0,
 
510
  value=1.0,
511
  )
512
  with gr.Row():
513
+ with gr.Column():
514
+ latent_file = gr.File(label="Image Prompt (Required)", file_types=["image"])
515
+ latent_file_preview = gr.Image(label="Image Prompt Preview", interactive=False)
516
  file_1_strength = gr.Slider(
517
  label="Img 1 %",
518
  minimum=0.0,
 
520
  step=0.01,
521
  value=1.0,
522
  )
523
+ with gr.Column():
524
+ latent_file_2 = gr.File(label="Image Prompt 2 (Optional)", file_types=["image"])
525
+ latent_file_2_preview = gr.Image(label="Image Prompt 2 Preview", interactive=False)
526
  file_2_strength = gr.Slider(
527
  label="Img 2 %",
528
  minimum=0.0,
 
530
  step=0.01,
531
  value=1.0,
532
  )
533
+ with gr.Column():
534
+ latent_file_3 = gr.File(label="Image Prompt 3 (Optional)", file_types=["image"])
535
+ latent_file_3_preview = gr.Image(label="Image Prompt 3 Preview", interactive=False)
536
  file_3_strength = gr.Slider(
537
  label="Img 3 %",
538
  minimum=0.0,
 
540
  step=0.01,
541
  value=1.0,
542
  )
543
+ with gr.Column():
544
+ latent_file_4 = gr.File(label="Image Prompt 4 (Optional)", file_types=["image"])
545
+ latent_file_4_preview = gr.Image(label="Image Prompt 4 Preview", interactive=False)
546
  file_4_strength = gr.Slider(
547
  label="Img 4 %",
548
  minimum=0.0,
 
550
  step=0.01,
551
  value=1.0,
552
  )
553
+ with gr.Column():
554
+ latent_file_5 = gr.File(label="Image Prompt 5 (Optional)", file_types=["image"])
555
+ latent_file_5_preview = gr.Image(label="Image Prompt 5 Preview", interactive=False)
556
  file_5_strength = gr.Slider(
557
  label="Img 5 %",
558
  minimum=0.0,
 
630
  outputs=negative_prompt,
631
  api_name=False,
632
  )
633
+
634
+ latent_file.change(
635
+ display_image,
636
+ inputs=[latent_file],
637
+ outputs=[latent_file_preview]
638
+ )
639
+ latent_file_2.change(
640
+ display_image,
641
+ inputs=[latent_file_2],
642
+ outputs=[latent_file_2_preview]
643
+ )
644
+ latent_file_3.change(
645
+ display_image,
646
+ inputs=[latent_file_3],
647
+ outputs=[latent_file_3_preview]
648
+ )
649
+ latent_file_4.change(
650
+ display_image,
651
+ inputs=[latent_file_4],
652
+ outputs=[latent_file_4_preview]
653
+ )
654
+ latent_file_5.change(
655
+ display_image,
656
+ inputs=[latent_file_5],
657
+ outputs=[latent_file_5_preview]
658
+ )
659
+
660
  gr.on(
661
  triggers=[
662
  run_button_30.click,
 
688
  ],
689
  outputs=[result],
690
  )
691
+
692
  gr.on(
693
  triggers=[
694
  run_button_60.click,
 
720
  ],
721
  outputs=[result],
722
  )
723
+
724
  gr.on(
725
  triggers=[
726
  run_button_90.click,
 
769
  gr.Markdown(
770
  """
771
  <div style="text-align: justify;">
772
+ ⚡This is the demo space for generating images using Stable Diffusion XL with quality styles, different models, and types. Try the sample prompts to generate higher quality images. Try the sample prompts for generating higher quality images.
773
  <a href='https://huggingface.co/spaces/prithivMLmods/Top-Prompt-Collection' target='_blank'>Try prompts</a>.
774
  </div>
775
  """)
 
779
  <div style="text-align: justify;">
780
  ⚠️ Users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.
781
  </div>
782
+ """)
783
 
784
  def text_generation(input_text, seed):
785
  full_prompt = "Text Generator Application by ecarbo"
786
  return full_prompt
787
+
788
  title = "Text Generator Demo GPT-Neo"
789
  description = "Text Generator Application by ecarbo"
790