|
from hidiffusion import apply_hidiffusion, remove_hidiffusion |
|
from diffusers import DiffusionPipeline, DDIMScheduler, AutoencoderKL |
|
import torch |
|
import spaces |
|
|
|
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse") |
|
safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker"), |
|
feature_extractor=CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32") |
|
pretrain_model = "SG161222/Realistic_Vision_V5.1_noVAE" |
|
scheduler = DDIMScheduler.from_pretrained(pretrain_model, subfolder="scheduler") |
|
pipe = DiffusionPipeline.from_pretrained(pretrain_model, scheduler = scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, vae=vae, torch_dtype=torch.float16).to("cuda") |
|
|
|
|
|
|
|
|
|
|
|
|
|
apply_hidiffusion(pipe) |
|
|
|
with gr.Blocks() as demo: |
|
prompt = gr.Textbox() |
|
negative_prompt = gr.Textbox() |
|
btn = gr.Button("Run") |
|
output = gr.Image() |
|
|
|
btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output]) |
|
|
|
@spaces.GPU |
|
def run_hidiffusion(prompt, negative_prompt): |
|
return pipe(prompt, guidance_scale=7.5, height=1024, width=1024, eta=1.0, negative_prompt=negative_prompt).images[0] |
|
|
|
demo.launch() |