File size: 3,514 Bytes
5f8a996 22595de fea8c5b 5f8a996 095368e af68b3c 095368e 5f8a996 b9509da 5f8a996 4893869 5f8a996 51a097e a756d15 5bed75e b9509da a756d15 5bed75e b9509da 5f8a996 b9509da fa1ed85 b9509da fa1ed85 b9509da 099258b 4e297e3 a756d15 5f8a996 b9509da eb97cd1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
from hidiffusion import apply_hidiffusion, remove_hidiffusion
from diffusers import DiffusionPipeline, DDIMScheduler, AutoencoderKL
import gradio as gr
import torch
import spaces
model = "stabilityai/stable-diffusion-xl-base-1.0"
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
scheduler = DDIMScheduler.from_pretrained(model, subfolder="scheduler")
pipe = DiffusionPipeline.from_pretrained(model, vae=vae, scheduler=scheduler, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda")
model_15 = "runwayml/stable-diffusion-v1-5"
scheduler_15 = DDIMScheduler.from_pretrained(model_15, subfolder="scheduler")
pipe_15 = DiffusionPipeline.from_pretrained(model_15, vae=vae, scheduler=scheduler_15, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda")
#pipe.enable_model_cpu_offload()
pipe.enable_vae_tiling()
@spaces.GPU
def run_hidiffusion(prompt, negative_prompt="", progress=gr.Progress(track_tqdm=True)):
apply_hidiffusion(pipe)
return pipe(prompt, guidance_scale=7.5, height=2048, width=2048, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
@spaces.GPU
def run_hidiffusion_15(prompt, negative_prompt="", progress=gr.Progress(track_tqdm=True)):
apply_hidiffusion(pipe_15)
return pipe_15(prompt, guidance_scale=7.5, height=1024, width=1024, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
with gr.Blocks() as demo:
gr.Markdown("# HiDiffusion Demo")
with gr.Tab("SDXL in 2048x2048"):
with gr.Row():
prompt = gr.Textbox(label="Prompt")
negative_prompt = gr.Textbox(
label="Negative Prompt",
value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, (NSFW:1.25)", # ์ด ๋ถ๋ถ์ ๊ธฐ๋ณธ๊ฐ์ ์ค์
visible=False # ์ฌ์ฉ์ ์ธํฐํ์ด์ค์์ ์ด ํ๋๋ฅผ ์จ๊น
)
btn = gr.Button("Run")
with gr.Tab("SD1.5 in 1024x1024"):
with gr.Row():
prompt_15 = gr.Textbox(label="Prompt")
negative_prompt = gr.Textbox(
label="Negative Prompt",
value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, (NSFW:1.25)", # ์ด ๋ถ๋ถ์ ๊ธฐ๋ณธ๊ฐ์ ์ค์
visible=False # ์ฌ์ฉ์ ์ธํฐํ์ด์ค์์ ์ด ํ๋๋ฅผ ์จ๊น
)
btn_15 = gr.Button("Run")
output = gr.Image(label="Result")
gr.Examples(examples=[
"a beautiful woman, full body visible, walking pose, red dress wear, direct frontal gaze, white color background, realritics photo, 16k",
"a beautiful woman, full body visible, direct frontal gaze, white color background, realritics photo, 16k",
"a beautiful woman, full body visible, model pose, direct frontal gaze, white color background, realritics photo, 16k"
], inputs=[prompt], outputs=[output], fn=run_hidiffusion)
btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output])
btn_15.click(fn=run_hidiffusion, inputs=[prompt_15, negative_prompt_15], outputs=[output])
demo.launch() |