|
|
|
import gradio as gr |
|
import torch, io, base64 |
|
from PIL import Image |
|
from diffusers import StableDiffusionImg2ImgPipeline |
|
from vtoonify_model import load_vtoonify |
|
|
|
|
|
pipe_ghibli = StableDiffusionImg2ImgPipeline.from_pretrained( |
|
"nitrosocke/Ghibli-Diffusion", torch_dtype=torch.float16 |
|
).to("cuda") |
|
|
|
pipe_vtoonify = load_vtoonify().to("cuda") |
|
|
|
|
|
def pil_to_b64(img: Image.Image) -> str: |
|
buf = io.BytesIO() |
|
img.save(buf, format="PNG") |
|
return base64.b64encode(buf.getvalue()).decode() |
|
|
|
def b64_to_pil(b64: str) -> Image.Image: |
|
data = base64.b64decode(b64) |
|
return Image.open(io.BytesIO(data)).convert("RGB") |
|
|
|
|
|
def run_effect(input_b64: str, effect: str) -> dict: |
|
img = b64_to_pil(input_b64) |
|
if effect == "ghibli": |
|
out = pipe_ghibli(prompt="ghibli style", image=img, strength=0.5, guidance_scale=7.5).images[0] |
|
else: |
|
out = pipe_vtoonify(img) |
|
return {"output_b64": pil_to_b64(out)} |
|
|
|
@gr.utils.decorators.thread_safe() |
|
@spaces.GPU |
|
def api_process(input_b64, effect): |
|
return run_effect(input_b64, effect) |
|
|
|
def gradio_process(img: Image.Image, effect: str) -> Image.Image: |
|
|
|
in_b64 = pil_to_b64(img) |
|
return b64_to_pil(run_effect(in_b64, effect)["output_b64"]) |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Ghibli & VToonify Effects π¨") |
|
|
|
with gr.Tab("Web UI"): |
|
inp = gr.Image(type="pil", label="Upload Image") |
|
eff = gr.Radio(["ghibli", "vtoonify"], label="Effect") |
|
btn = gr.Button("Apply Effect") |
|
out = gr.Image(label="Result") |
|
btn.click(gradio_process, [inp, eff], out) |
|
|
|
with gr.Tab("API (base64)"): |
|
inp_b64 = gr.Textbox(lines=4, label="Input Image (base64)") |
|
eff2 = gr.Radio(["ghibli", "vtoonify"], label="Effect") |
|
btn2 = gr.Button("Run API") |
|
out_b64 = gr.Textbox(lines=4, label="Output Image (base64)") |
|
btn2.click(api_process, [inp_b64, eff2], out_b64) |
|
|
|
demo.launch() |
|
|