File size: 1,672 Bytes
5f8a996 3a8556a bf3af40 fea8c5b 5f8a996 c76773c fa5ab2f 5f8a996 fa5ab2f 5f8a996 51a097e c76773c 51a097e 5f8a996 eb97cd1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
from hidiffusion import apply_hidiffusion, remove_hidiffusion
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
import gradio as gr
import torch
import spaces
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
#safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker"),
#feature_extractor=CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
pretrain_model = "SG161222/Realistic_Vision_V5.1_noVAE"
scheduler = DDIMScheduler.from_pretrained(pretrain_model, subfolder="scheduler")
pipe = StableDiffusionPipeline.from_pretrained(pretrain_model, scheduler = scheduler, vae=vae, torch_dtype=torch.float16).to("cuda")
# # Optional. enable_xformers_memory_efficient_attention can save memory usage and increase inference speed. enable_model_cpu_offload and enable_vae_tiling can save memory usage.
#pipe.enable_model_cpu_offload()
#pipe.enable_vae_tiling()
# Apply hidiffusion with a single line of code.
apply_hidiffusion(pipe)
@spaces.GPU
def run_hidiffusion(prompt, negative_prompt, progress=gr.Progress(track_tqdm=True)):
return pipe(prompt, guidance_scale=7.5, height=1024, width=1024, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
with gr.Blocks() as demo:
prompt = gr.Textbox()
negative_prompt = gr.Textbox()
btn = gr.Button("Run")
output = gr.Image()
btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output])
demo.launch() |