model / app.py
seawolf2357's picture
Update app.py
fa1ed85 verified
raw
history blame
3.51 kB
from hidiffusion import apply_hidiffusion, remove_hidiffusion
from diffusers import DiffusionPipeline, DDIMScheduler, AutoencoderKL
import gradio as gr
import torch
import spaces
model = "stabilityai/stable-diffusion-xl-base-1.0"
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
scheduler = DDIMScheduler.from_pretrained(model, subfolder="scheduler")
pipe = DiffusionPipeline.from_pretrained(model, vae=vae, scheduler=scheduler, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda")
model_15 = "runwayml/stable-diffusion-v1-5"
scheduler_15 = DDIMScheduler.from_pretrained(model_15, subfolder="scheduler")
pipe_15 = DiffusionPipeline.from_pretrained(model_15, vae=vae, scheduler=scheduler_15, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda")
#pipe.enable_model_cpu_offload()
pipe.enable_vae_tiling()
@spaces.GPU
def run_hidiffusion(prompt, negative_prompt="", progress=gr.Progress(track_tqdm=True)):
apply_hidiffusion(pipe)
return pipe(prompt, guidance_scale=7.5, height=2048, width=2048, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
@spaces.GPU
def run_hidiffusion_15(prompt, negative_prompt="", progress=gr.Progress(track_tqdm=True)):
apply_hidiffusion(pipe_15)
return pipe_15(prompt, guidance_scale=7.5, height=1024, width=1024, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
with gr.Blocks() as demo:
gr.Markdown("# HiDiffusion Demo")
with gr.Tab("SDXL in 2048x2048"):
with gr.Row():
prompt = gr.Textbox(label="Prompt")
negative_prompt = gr.Textbox(
label="Negative Prompt",
value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, (NSFW:1.25)", # ์ด ๋ถ€๋ถ„์— ๊ธฐ๋ณธ๊ฐ’์„ ์„ค์ •
visible=False # ์‚ฌ์šฉ์ž ์ธํ„ฐํŽ˜์ด์Šค์—์„œ ์ด ํ•„๋“œ๋ฅผ ์ˆจ๊น€
)
btn = gr.Button("Run")
with gr.Tab("SD1.5 in 1024x1024"):
with gr.Row():
prompt_15 = gr.Textbox(label="Prompt")
negative_prompt = gr.Textbox(
label="Negative Prompt",
value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, (NSFW:1.25)", # ์ด ๋ถ€๋ถ„์— ๊ธฐ๋ณธ๊ฐ’์„ ์„ค์ •
visible=False # ์‚ฌ์šฉ์ž ์ธํ„ฐํŽ˜์ด์Šค์—์„œ ์ด ํ•„๋“œ๋ฅผ ์ˆจ๊น€
)
btn_15 = gr.Button("Run")
output = gr.Image(label="Result")
gr.Examples(examples=[
"a beautiful woman, full body visible, walking pose, red dress wear, direct frontal gaze, white color background, realritics photo, 16k",
"a beautiful woman, full body visible, direct frontal gaze, white color background, realritics photo, 16k",
"a beautiful woman, full body visible, model pose, direct frontal gaze, white color background, realritics photo, 16k"
], inputs=[prompt], outputs=[output], fn=run_hidiffusion)
btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output])
btn_15.click(fn=run_hidiffusion, inputs=[prompt_15, negative_prompt_15], outputs=[output])
demo.launch()