|
from hidiffusion import apply_hidiffusion, remove_hidiffusion |
|
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL |
|
from transformers import CLIPFeatureExtractor |
|
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker |
|
import gradio as gr |
|
import torch |
|
import spaces |
|
|
|
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16) |
|
|
|
|
|
pretrain_model = "SG161222/Realistic_Vision_V5.1_noVAE" |
|
scheduler = DDIMScheduler.from_pretrained(pretrain_model, subfolder="scheduler") |
|
pipe = StableDiffusionPipeline.from_pretrained(pretrain_model, scheduler = scheduler, vae=vae, torch_dtype=torch.float16).to("cuda") |
|
|
|
|
|
|
|
|
|
|
|
|
|
apply_hidiffusion(pipe) |
|
|
|
@spaces.GPU |
|
def run_hidiffusion(prompt, negative_prompt, progress=gr.Progress(track_tqdm=True)): |
|
return pipe(prompt, guidance_scale=7.5, height=1024, width=1024, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0] |
|
|
|
with gr.Blocks() as demo: |
|
prompt = gr.Textbox() |
|
negative_prompt = gr.Textbox() |
|
btn = gr.Button("Run") |
|
output = gr.Image() |
|
|
|
btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output]) |
|
|
|
demo.launch() |