KingNish commited on
Commit
fd8667f
·
verified ·
1 Parent(s): 68848f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -6,7 +6,7 @@ import gradio as gr
6
  import numpy as np
7
  import torch
8
  from PIL import Image
9
- from diffusers import DiffusionPipeline, StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL
10
  from huggingface_hub import hf_hub_download, InferenceClient
11
 
12
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
@@ -18,7 +18,9 @@ pipe.to("cuda")
18
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
19
  refiner.to("cuda")
20
 
21
- pipe_fast = DiffusionPipeline.from_pretrained("SG161222/RealVisXL_V4.0_Lightning", torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
 
 
22
  pipe_fast.to("cuda")
23
 
24
  help_text = """
@@ -90,6 +92,7 @@ def king(type ,
90
  generator = torch.Generator().manual_seed(seed)
91
  if fast:
92
  pipes=pipe_fast
 
93
  else:
94
  pipes=pipe
95
  image = pipes( prompt = instruction,
 
6
  import numpy as np
7
  import torch
8
  from PIL import Image
9
+ from diffusers import DiffusionPipeline, StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
10
  from huggingface_hub import hf_hub_download, InferenceClient
11
 
12
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
 
18
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
19
  refiner.to("cuda")
20
 
21
+ pipe_fast = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V4.0_Lightning", torch_dtype=torch.float16, vae=vae)
22
+ pipe_fast.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
23
+ pipe_fast.set_adapters("lora")
24
  pipe_fast.to("cuda")
25
 
26
  help_text = """
 
92
  generator = torch.Generator().manual_seed(seed)
93
  if fast:
94
  pipes=pipe_fast
95
+ steps=int(steps/4)
96
  else:
97
  pipes=pipe
98
  image = pipes( prompt = instruction,