File size: 2,559 Bytes
8f691f9 1cce0f0 8f691f9 1cce0f0 8f691f9 1cce0f0 23f8079 07e4414 dea1924 1cce0f0 8f691f9 1cce0f0 8f691f9 1cce0f0 dea1924 1cce0f0 edfa13e 1cce0f0 dea1924 8f691f9 52ee8e4 dea1924 1cce0f0 bad89c4 1cce0f0 8f691f9 1cce0f0 8f691f9 1cce0f0 8f691f9 1cce0f0 8f691f9 dea1924 8f691f9 1cce0f0 8f691f9 7193aee 8f691f9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel, OVStableDiffusionPipeline
import torch
from huggingface_hub import snapshot_download
import openvino.runtime as ov
from typing import Optional, Dict
model_id = "Disty0/LCM_SoteMix"
HIGH=768
WIDTH=768
batch_size = -1
class CustomOVModelVaeDecoder(OVModelVaeDecoder):
def __init__(
self, model: ov.Model, parent_model: OVBaseModel, ov_config: Optional[Dict[str, str]] = None, model_dir: str = None,
):
super(OVModelVaeDecoder, self).__init__(model, parent_model, ov_config, "vae_decoder", model_dir)
pipe = OVStableDiffusionPipeline.from_pretrained(model_id, compile = False, ov_config = {"CACHE_DIR":""})
taesd_dir = snapshot_download(repo_id="deinferno/taesd-openvino")
pipe.vae_decoder = CustomOVModelVaeDecoder(model = OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"), parent_model = pipe, model_dir = taesd_dir)
pipe.reshape( batch_size=-1, height=HIGH, width=WIDTH, num_images_per_prompt=1)
pipe.compile()
prompt=""
negative_prompt=""
def infer(prompt,negative_prompt,step):
image = pipe(
prompt = prompt,
negative_prompt = negative_prompt,
width = WIDTH,
height = WIDTH,
guidance_scale=1.0,
num_inference_steps=8,
num_images_per_prompt=1,
).images[0]
return image
examples = [
"A cute kitten, Japanese cartoon style.",
"A sweet family, dad stands next to mom, mom holds baby girl.",
"A delicious ceviche cheesecake slice",
]
css="""
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
power_device = "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# Disty0/LCM_SoteMix 1024x1024
Currently running on {power_device}.
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
gr.Examples(
examples = examples,
inputs = [prompt]
)
run_button.click(
fn = infer,
inputs = [prompt],
outputs = [result]
)
demo.queue().launch() |