ford442 commited on
Commit
2d8fa65
·
verified ·
1 Parent(s): d16b9a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -131,7 +131,7 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
131
  negative = ""
132
  return p.replace("{prompt}", positive), n + negative
133
 
134
- def load_and_prepare_model(model_id):
135
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
136
  vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None,use_safetensors=False)
137
  #vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
@@ -231,11 +231,11 @@ def load_and_prepare_model(model_id):
231
  #pipe.to(device, torch.bfloat16)
232
  #del pipeX
233
 
234
- pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
235
  pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", adapter_name="skin")
236
  pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/epicNewPhoto.safetensors", adapter_name="photo")
237
  #pipe.set_adapters(["skin", "photo", "fantasy"], adapter_weights=[0.5, 0.5, 0.5])
238
- pipe.set_adapters(["skin", "photo", "fantasy"], adapter_weights=[1.0, 1.0, 1.0])
239
 
240
  #sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="dpmsolver++")
241
  #sched = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++")
@@ -243,8 +243,8 @@ def load_and_prepare_model(model_id):
243
  return pipe
244
 
245
  # Preload and compile both models
246
- models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
247
-
248
  MAX_SEED = np.iinfo(np.int32).max
249
 
250
  neg_prompt_2 = " 'non-photorealistic':1.5, 'unrealistic skin','unattractive face':1.3, 'low quality':1.1, ('dull color scheme', 'dull colors', 'digital noise':1.2),'amateurish', 'poorly drawn face':1.3, 'poorly drawn', 'distorted face', 'low resolution', 'simplistic' "
@@ -311,8 +311,8 @@ def generate_30(
311
  #torch.backends.cudnn.benchmark = False
312
  #torch.cuda.empty_cache()
313
  #gc.collect()
314
- global models
315
- pipe = models[model_choice]
316
  seed = int(randomize_seed_fn(seed, randomize_seed))
317
  generator = torch.Generator(device='cuda').manual_seed(seed)
318
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
@@ -373,8 +373,8 @@ def generate_60(
373
  #torch.backends.cudnn.benchmark = True
374
  #torch.cuda.empty_cache()
375
  #gc.collect()
376
- global models
377
- pipe = models[model_choice]
378
  seed = int(randomize_seed_fn(seed, randomize_seed))
379
  generator = torch.Generator(device='cuda').manual_seed(seed)
380
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
@@ -435,8 +435,8 @@ def generate_90(
435
  #torch.backends.cudnn.benchmark = True
436
  #torch.cuda.empty_cache()
437
  #gc.collect()
438
- global models
439
- pipe = models[model_choice]
440
  seed = int(randomize_seed_fn(seed, randomize_seed))
441
  generator = torch.Generator(device='cuda').manual_seed(seed)
442
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
 
131
  negative = ""
132
  return p.replace("{prompt}", positive), n + negative
133
 
134
+ def load_and_prepare_model():
135
  #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
136
  vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None,use_safetensors=False)
137
  #vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
 
231
  #pipe.to(device, torch.bfloat16)
232
  #del pipeX
233
 
234
+ #pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
235
  pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", adapter_name="skin")
236
  pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/epicNewPhoto.safetensors", adapter_name="photo")
237
  #pipe.set_adapters(["skin", "photo", "fantasy"], adapter_weights=[0.5, 0.5, 0.5])
238
+ pipe.set_adapters(["skin", "photo"], adapter_weights=[1.0, 1.0])
239
 
240
  #sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="dpmsolver++")
241
  #sched = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++")
 
243
  return pipe
244
 
245
  # Preload and compile both models
246
+ #models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
247
+ pipe = load_and_prepare_model()
248
  MAX_SEED = np.iinfo(np.int32).max
249
 
250
  neg_prompt_2 = " 'non-photorealistic':1.5, 'unrealistic skin','unattractive face':1.3, 'low quality':1.1, ('dull color scheme', 'dull colors', 'digital noise':1.2),'amateurish', 'poorly drawn face':1.3, 'poorly drawn', 'distorted face', 'low resolution', 'simplistic' "
 
311
  #torch.backends.cudnn.benchmark = False
312
  #torch.cuda.empty_cache()
313
  #gc.collect()
314
+ #global models
315
+ #pipe = models[model_choice]
316
  seed = int(randomize_seed_fn(seed, randomize_seed))
317
  generator = torch.Generator(device='cuda').manual_seed(seed)
318
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
 
373
  #torch.backends.cudnn.benchmark = True
374
  #torch.cuda.empty_cache()
375
  #gc.collect()
376
+ #global models
377
+ #pipe = models[model_choice]
378
  seed = int(randomize_seed_fn(seed, randomize_seed))
379
  generator = torch.Generator(device='cuda').manual_seed(seed)
380
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
 
435
  #torch.backends.cudnn.benchmark = True
436
  #torch.cuda.empty_cache()
437
  #gc.collect()
438
+ #global models
439
+ #pipe = models[model_choice]
440
  seed = int(randomize_seed_fn(seed, randomize_seed))
441
  generator = torch.Generator(device='cuda').manual_seed(seed)
442
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)