ford442 commited on
Commit
3e2af3a
·
verified ·
1 Parent(s): 60bdc34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -27
app.py CHANGED
@@ -257,15 +257,11 @@ def generate_30(
257
  num_inference_steps: int = 125,
258
  use_resolution_binning: bool = True,
259
  denoise: float = 0.3,
260
- vae_scale: int = 8,
261
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
262
  ):
263
  #torch.backends.cudnn.benchmark = False
264
- #torch.cuda.empty_cache()
265
- #gc.collect()
266
- #global models
267
- #pipe = models[model_choice]
268
- pipe.vae.vae_scale_factor=vae_scale
269
  seed = int(randomize_seed_fn())
270
  generator = torch.Generator(device='cuda').manual_seed(seed)
271
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
@@ -319,15 +315,11 @@ def generate_60(
319
  num_inference_steps: int = 250,
320
  use_resolution_binning: bool = True,
321
  denoise: float = 0.3,
322
- vae_scale: int = 8,
323
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
324
  ):
325
  #torch.backends.cudnn.benchmark = False
326
- #torch.cuda.empty_cache()
327
- #gc.collect()
328
- #global models
329
- #pipe = models[model_choice]
330
- pipe.vae.vae_scale_factor=vae_scale
331
  seed = int(randomize_seed_fn())
332
  generator = torch.Generator(device='cuda').manual_seed(seed)
333
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
@@ -381,15 +373,11 @@ def generate_90(
381
  num_inference_steps: int = 250,
382
  use_resolution_binning: bool = True,
383
  denoise: float = 0.3,
384
- vae_scale: int = 8,
385
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
386
  ):
387
  #torch.backends.cudnn.benchmark = False
388
- #torch.cuda.empty_cache()
389
- #gc.collect()
390
- #global models
391
- #pipe = models[model_choice]
392
- pipe.vae.vae_scale_factor=vae_scale
393
  seed = int(randomize_seed_fn())
394
  generator = torch.Generator(device='cuda').manual_seed(seed)
395
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
@@ -506,12 +494,12 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
506
  step=0.01,
507
  value=0.3,
508
  )
509
- vae_scale = gr.Slider(
510
- label="VAE Scale Multiplier",
511
- minimum=1,
512
- maximum=16,
513
- step=1,
514
- value=8,
515
  )
516
  with gr.Row():
517
  width = gr.Slider(
@@ -574,7 +562,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
574
  guidance_scale,
575
  num_inference_steps,
576
  denoise,
577
- vae_scale,
578
  ],
579
  outputs=[result],
580
  )
@@ -596,7 +584,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
596
  guidance_scale,
597
  num_inference_steps,
598
  denoise,
599
- vae_scale,
600
  ],
601
  outputs=[result],
602
  )
@@ -618,7 +606,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
618
  guidance_scale,
619
  num_inference_steps,
620
  denoise,
621
- vae_scale,
622
  ],
623
  outputs=[result],
624
  )
 
257
  num_inference_steps: int = 125,
258
  use_resolution_binning: bool = True,
259
  denoise: float = 0.3,
260
+ lora_scale: float = 0.5,
261
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
262
  ):
263
  #torch.backends.cudnn.benchmark = False
264
+ pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
 
 
 
 
265
  seed = int(randomize_seed_fn())
266
  generator = torch.Generator(device='cuda').manual_seed(seed)
267
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
 
315
  num_inference_steps: int = 250,
316
  use_resolution_binning: bool = True,
317
  denoise: float = 0.3,
318
+ lora_scale: float = 0.5,
319
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
320
  ):
321
  #torch.backends.cudnn.benchmark = False
322
+ pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
 
 
 
 
323
  seed = int(randomize_seed_fn())
324
  generator = torch.Generator(device='cuda').manual_seed(seed)
325
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
 
373
  num_inference_steps: int = 250,
374
  use_resolution_binning: bool = True,
375
  denoise: float = 0.3,
376
+ lora_scale: float = 0.5,
377
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
378
  ):
379
  #torch.backends.cudnn.benchmark = False
380
+ pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
 
 
 
 
381
  seed = int(randomize_seed_fn())
382
  generator = torch.Generator(device='cuda').manual_seed(seed)
383
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
 
494
  step=0.01,
495
  value=0.3,
496
  )
497
+ lora_scale = gr.Slider(
498
+ label="LORA Scale (Skin)",
499
+ minimum=0.0,
500
+ maximum=1.0,
501
+ step=0.01,
502
+ value=0.5,
503
  )
504
  with gr.Row():
505
  width = gr.Slider(
 
562
  guidance_scale,
563
  num_inference_steps,
564
  denoise,
565
+ lora_scale,
566
  ],
567
  outputs=[result],
568
  )
 
584
  guidance_scale,
585
  num_inference_steps,
586
  denoise,
587
+ lora_scale,
588
  ],
589
  outputs=[result],
590
  )
 
606
  guidance_scale,
607
  num_inference_steps,
608
  denoise,
609
+ lora_scale,
610
  ],
611
  outputs=[result],
612
  )