1inkusFace commited on
Commit
78ad6cc
·
verified ·
1 Parent(s): 536bfff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -7
app.py CHANGED
@@ -106,7 +106,6 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
106
  negative = ""
107
  return p.replace("{prompt}", positive), n + negative
108
 
109
-
110
  def load_and_prepare_model():
111
  unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='unet', low_cpu_mem_usage=False, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
112
  vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False, low_cpu_mem_usage=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
@@ -121,7 +120,7 @@ def load_and_prepare_model():
121
  text_encoder=None,
122
  text_encoder_2=None,
123
  vae=None,
124
- unet=unetX,
125
  )
126
 
127
  '''
@@ -138,8 +137,9 @@ def load_and_prepare_model():
138
  `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
139
 
140
  '''
141
- pipe.vae=vaeX
142
  pipe.to(device=device, dtype=torch.bfloat16)
 
143
  #pipe.vae.to(device=device, dtype=torch.bfloat16)
144
  #pipe.vae.do_resize=False
145
  #pipe.vae.do_rescale=False
@@ -174,7 +174,7 @@ txt_tokenizer.tokenizer_legacy=False
174
  model = Phi3ForCausalLM.from_pretrained(checkpoint).to('cuda:0')
175
  #model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map='cuda') #.to('cuda')
176
 
177
- ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
178
  text_encoder_1=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True) #.to(device=device, dtype=torch.bfloat16)
179
  text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True) #.to(device=device, dtype=torch.bfloat16)
180
 
@@ -448,7 +448,8 @@ def generate_30(
448
  global text_encoder_2
449
  pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
450
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
451
- #pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
 
452
  print('-- generating image --')
453
  sd_image = ip_model.generate(
454
  pil_image_1=sd_image_a,
@@ -599,7 +600,8 @@ def generate_60(
599
  global text_encoder_2
600
  pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
601
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
602
- #pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
 
603
  print('-- generating image --')
604
  sd_image = ip_model.generate(
605
  pil_image_1=sd_image_a,
@@ -750,7 +752,8 @@ def generate_90(
750
  global text_encoder_2
751
  pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
752
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
753
- #pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
 
754
  print('-- generating image --')
755
  sd_image = ip_model.generate(
756
  pil_image_1=sd_image_a,
 
106
  negative = ""
107
  return p.replace("{prompt}", positive), n + negative
108
 
 
109
  def load_and_prepare_model():
110
  unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='unet', low_cpu_mem_usage=False, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
111
  vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False, low_cpu_mem_usage=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
 
120
  text_encoder=None,
121
  text_encoder_2=None,
122
  vae=None,
123
+ unet=None,
124
  )
125
 
126
  '''
 
137
  `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
138
 
139
  '''
140
+ #pipe.vae=vaeX
141
  pipe.to(device=device, dtype=torch.bfloat16)
142
+ pipe.vae=vaeX
143
  #pipe.vae.to(device=device, dtype=torch.bfloat16)
144
  #pipe.vae.do_resize=False
145
  #pipe.vae.do_rescale=False
 
174
  model = Phi3ForCausalLM.from_pretrained(checkpoint).to('cuda:0')
175
  #model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map='cuda') #.to('cuda')
176
 
177
+ #ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
178
  text_encoder_1=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True) #.to(device=device, dtype=torch.bfloat16)
179
  text_encoder_2=CLIPTextModelWithProjection.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder_2',token=True) #.to(device=device, dtype=torch.bfloat16)
180
 
 
448
  global text_encoder_2
449
  pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
450
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
451
+ pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
452
+ ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
453
  print('-- generating image --')
454
  sd_image = ip_model.generate(
455
  pil_image_1=sd_image_a,
 
600
  global text_encoder_2
601
  pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
602
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
603
+ pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
604
+ ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
605
  print('-- generating image --')
606
  sd_image = ip_model.generate(
607
  pil_image_1=sd_image_a,
 
752
  global text_encoder_2
753
  pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
754
  pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
755
+ pipe.unet=unetX.to(device=device, dtype=torch.bfloat16)
756
+ ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
757
  print('-- generating image --')
758
  sd_image = ip_model.generate(
759
  pil_image_1=sd_image_a,