from hidiffusion import apply_hidiffusion, remove_hidiffusion from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL from transformers import CLIPFeatureExtractor from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker import gradio as gr import torch import spaces vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16) #safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker"), #feature_extractor=CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32") pretrain_model = "SG161222/Realistic_Vision_V5.1_noVAE" scheduler = DDIMScheduler.from_pretrained(pretrain_model, subfolder="scheduler") pipe = StableDiffusionPipeline.from_pretrained(pretrain_model, scheduler = scheduler, vae=vae, torch_dtype=torch.float16).to("cuda") # # Optional. enable_xformers_memory_efficient_attention can save memory usage and increase inference speed. enable_model_cpu_offload and enable_vae_tiling can save memory usage. #pipe.enable_model_cpu_offload() #pipe.enable_vae_tiling() # Apply hidiffusion with a single line of code. apply_hidiffusion(pipe) @spaces.GPU def run_hidiffusion(prompt, negative_prompt, progress=gr.Progress(track_tqdm=True)): return pipe(prompt, guidance_scale=7.5, height=1024, width=1024, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0] with gr.Blocks() as demo: prompt = gr.Textbox() negative_prompt = gr.Textbox() btn = gr.Button("Run") output = gr.Image() btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output]) demo.launch()