ford442 commited on
Commit
c192ce5
·
verified ·
1 Parent(s): c686b86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -96,7 +96,7 @@ def load_and_prepare_model(model_id):
96
  #vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
97
  #vae = AutoencoderKL.from_single_file('https://huggingface.co/ford442/sdxl-vae-bf16/mySLR/myslrVAE_v10.safetensors')
98
  #vaeX = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse",use_safetensors=True)
99
- vaeX = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae', safety_checker=None, use_safetensors=False) # ,use_safetensors=True FAILS
100
  #vaeX = AutoencoderKL.from_pretrained('ford442/RealVisXL_V5.0_FP64',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
101
  #unetX = UNet2DConditionModel.from_pretrained('SG161222/RealVisXL_V5.0',subfolder='unet') # ,use_safetensors=True FAILS
102
  # vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
@@ -249,8 +249,8 @@ def generate_30(
249
  gc.collect()
250
  global models
251
  pipe = models[model_choice]
252
- if juggernaut == True:
253
- pipe.vae=vaeX
254
  seed = int(randomize_seed_fn(seed, randomize_seed))
255
  generator = torch.Generator(device='cuda').manual_seed(seed)
256
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
@@ -311,8 +311,8 @@ def generate_60(
311
  gc.collect()
312
  global models
313
  pipe = models[model_choice]
314
- if juggernaut == True:
315
- pipe.vae=vaeX
316
  seed = int(randomize_seed_fn(seed, randomize_seed))
317
  generator = torch.Generator(device='cuda').manual_seed(seed)
318
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
@@ -373,8 +373,8 @@ def generate_90(
373
  gc.collect()
374
  global models
375
  pipe = models[model_choice]
376
- if juggernaut == True:
377
- pipe.vae=vaeX
378
  seed = int(randomize_seed_fn(seed, randomize_seed))
379
  generator = torch.Generator(device='cuda').manual_seed(seed)
380
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
 
96
  #vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
97
  #vae = AutoencoderKL.from_single_file('https://huggingface.co/ford442/sdxl-vae-bf16/mySLR/myslrVAE_v10.safetensors')
98
  #vaeX = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse",use_safetensors=True)
99
+ #vaeX = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae', safety_checker=None, use_safetensors=False) # ,use_safetensors=True FAILS
100
  #vaeX = AutoencoderKL.from_pretrained('ford442/RealVisXL_V5.0_FP64',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
101
  #unetX = UNet2DConditionModel.from_pretrained('SG161222/RealVisXL_V5.0',subfolder='unet') # ,use_safetensors=True FAILS
102
  # vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
 
249
  gc.collect()
250
  global models
251
  pipe = models[model_choice]
252
+ #if juggernaut == True:
253
+ # pipe.vae=vaeX
254
  seed = int(randomize_seed_fn(seed, randomize_seed))
255
  generator = torch.Generator(device='cuda').manual_seed(seed)
256
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
 
311
  gc.collect()
312
  global models
313
  pipe = models[model_choice]
314
+ #if juggernaut == True:
315
+ # pipe.vae=vaeX
316
  seed = int(randomize_seed_fn(seed, randomize_seed))
317
  generator = torch.Generator(device='cuda').manual_seed(seed)
318
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
 
373
  gc.collect()
374
  global models
375
  pipe = models[model_choice]
376
+ #if juggernaut == True:
377
+ # pipe.vae=vaeX
378
  seed = int(randomize_seed_fn(seed, randomize_seed))
379
  generator = torch.Generator(device='cuda').manual_seed(seed)
380
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)