1inkusFace commited on
Commit
d6f4feb
·
verified ·
1 Parent(s): 02748d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -110,11 +110,11 @@ model_repo='John6666/uber-realistic-porn-merge-xl-urpmxl-v6final-sdxl'
110
 
111
  rv='ford442/RealVisXL_V5.0_BF16'
112
 
113
- text_encoder = CLIPTextModel.from_pretrained(rv, low_cpu_mem_usage=False, subfolder='text_encoder', token=True)#.to(device=device, dtype=torch.bfloat16)
114
- text_encoder_2 = CLIPTextModelWithProjection.from_pretrained(model_repo, low_cpu_mem_usage=False, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
115
- tokenizer_1 = CLIPTokenizer.from_pretrained(rv, low_cpu_mem_usage=False, subfolder='tokenizer', token=True)
116
- tokenizer_2 = CLIPTokenizer.from_pretrained(model_repo, low_cpu_mem_usage=False, subfolder='tokenizer_2', token=True)
117
- scheduler = EulerAncestralDiscreteScheduler.from_pretrained(rv, low_cpu_mem_usage=False, subfolder='scheduler', token=True)
118
  vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
119
  unet = UNet2DConditionModel.from_pretrained(model_repo, low_cpu_mem_usage=False, subfolder='unet', upcast_attention=True, attention_type='gated-text-image', token=True)
120
 
@@ -130,17 +130,17 @@ def load_and_prepare_model():
130
  token=True,
131
  text_encoder=None,
132
  text_encoder_2=None,
133
- tokenizer=None,
134
- tokenizer_2=None,
135
- scheduler=None,
136
- unet=None,
137
  vae=None,
138
  )
139
 
140
- pipe.scheduler=scheduler
141
- pipe.tokenizer=tokenizer_1
142
- pipe.tokenizer_2=tokenizer_2
143
- pipe.unet=unet
144
  '''
145
  scaling_factor (`float`, *optional*, defaults to 0.18215):
146
  The component-wise standard deviation of the trained latent space computed using the first batch of the
 
110
 
111
  rv='ford442/RealVisXL_V5.0_BF16'
112
 
113
+ text_encoder = CLIPTextModel.from_pretrained(rv, subfolder='text_encoder', token=True)#.to(device=device, dtype=torch.bfloat16)
114
+ text_encoder_2 = CLIPTextModelWithProjection.from_pretrained(model_repo, subfolder='text_encoder_2',token=True)#.to(device=device, dtype=torch.bfloat16)
115
+ tokenizer_1 = CLIPTokenizer.from_pretrained(rv, subfolder='tokenizer', token=True)
116
+ tokenizer_2 = CLIPTokenizer.from_pretrained(model_repo, subfolder='tokenizer_2', token=True)
117
+ scheduler = EulerAncestralDiscreteScheduler.from_pretrained(rv, subfolder='scheduler', token=True)
118
  vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", low_cpu_mem_usage=False, safety_checker=None, use_safetensors=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
119
  unet = UNet2DConditionModel.from_pretrained(model_repo, low_cpu_mem_usage=False, subfolder='unet', upcast_attention=True, attention_type='gated-text-image', token=True)
120
 
 
130
  token=True,
131
  text_encoder=None,
132
  text_encoder_2=None,
133
+ #tokenizer=None,
134
+ #tokenizer_2=None,
135
+ #scheduler=None,
136
+ unet=unet,
137
  vae=None,
138
  )
139
 
140
+ #pipe.scheduler=scheduler
141
+ #pipe.tokenizer=tokenizer_1
142
+ #pipe.tokenizer_2=tokenizer_2
143
+ #pipe.unet=unet
144
  '''
145
  scaling_factor (`float`, *optional*, defaults to 0.18215):
146
  The component-wise standard deviation of the trained latent space computed using the first batch of the