File size: 1,306 Bytes
5f8a996 3a8556a bf3af40 fea8c5b 5f8a996 af68b3c 5f8a996 af68b3c 5f8a996 51a097e c76773c d3f0c38 51a097e 5f8a996 eb97cd1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
from hidiffusion import apply_hidiffusion, remove_hidiffusion
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
import gradio as gr
import torch
import spaces
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
scheduler = DDIMScheduler.from_pretrained(pretrain_model, subfolder="scheduler")
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", vae=vae, scheduler=scheduler, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda")
#pipe.enable_model_cpu_offload()
#pipe.enable_vae_tiling()
# Apply hidiffusion with a single line of code.
apply_hidiffusion(pipe)
@spaces.GPU
def run_hidiffusion(prompt, negative_prompt, progress=gr.Progress(track_tqdm=True)):
return pipe(prompt, guidance_scale=7.5, height=2048, width=2048, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
with gr.Blocks() as demo:
prompt = gr.Textbox()
negative_prompt = gr.Textbox()
btn = gr.Button("Run")
output = gr.Image()
btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output])
demo.launch() |