|
from hidiffusion import apply_hidiffusion, remove_hidiffusion |
|
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL |
|
from transformers import CLIPFeatureExtractor |
|
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker |
|
import gradio as gr |
|
import torch |
|
import spaces |
|
|
|
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) |
|
scheduler = DDIMScheduler.from_pretrained(pretrain_model, subfolder="scheduler") |
|
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", vae=vae, scheduler=scheduler, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda") |
|
|
|
|
|
|
|
|
|
|
|
apply_hidiffusion(pipe) |
|
|
|
@spaces.GPU |
|
def run_hidiffusion(prompt, negative_prompt, progress=gr.Progress(track_tqdm=True)): |
|
return pipe(prompt, guidance_scale=7.5, height=2048, width=2048, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0] |
|
|
|
with gr.Blocks() as demo: |
|
prompt = gr.Textbox() |
|
negative_prompt = gr.Textbox() |
|
btn = gr.Button("Run") |
|
output = gr.Image() |
|
|
|
btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output]) |
|
|
|
demo.launch() |