File size: 2,758 Bytes
8f691f9
 
 
 
1cce0f0
8f691f9
1cce0f0
 
 
8f691f9
1cce0f0
 
 
 
 
 
 
8f691f9
 
1cce0f0
8f691f9
1cce0f0
 
 
 
 
 
 
 
8f691f9
 
1cce0f0
d1288f3
1cce0f0
 
 
 
 
8f691f9
 
 
 
1cce0f0
8f691f9
1cce0f0
 
8f691f9
 
 
 
 
 
 
 
 
 
1cce0f0
 
8f691f9
 
 
 
 
1cce0f0
8f691f9
 
 
 
 
 
 
 
 
 
1cce0f0
8f691f9
 
 
 
 
 
 
 
 
 
 
1cce0f0
8f691f9
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import gradio as gr
import numpy as np
import random
from diffusers import DiffusionPipeline
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel, OVStableDiffusionPipeline
import torch
from huggingface_hub import snapshot_download
import openvino.runtime as ov
from typing import Optional, Dict

model_id = "Disty0/LCM_SoteMix"
batch_size = -1
class CustomOVModelVaeDecoder(OVModelVaeDecoder):
    def __init__(
        self, model: ov.Model, parent_model: OVBaseModel, ov_config: Optional[Dict[str, str]] = None, model_dir: str = None,
    ):
        super(OVModelVaeDecoder, self).__init__(model, parent_model, ov_config, "vae_decoder", model_dir)


pipe = OVStableDiffusionPipeline.from_pretrained(model_id, compile = False, ov_config = {"CACHE_DIR":""})

taesd_dir = snapshot_download(repo_id="deinferno/taesd-openvino")
pipe.vae_decoder = CustomOVModelVaeDecoder(model = OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"), parent_model = pipe, model_dir = taesd_dir)

pipe.reshape( batch_size=-1, height=512, width=512, num_images_per_prompt=1)
pipe.compile()


def infer(prompt,negative_prompt):

    image = pipe(
        prompt = prompt+"score_8_up,score_7_up,score_6_up,score_9,score_8_up,score_7,masterpiece,best quality,source_anime,bangs,", 
        negative_prompt = "SimpleNegative_AnimeV1, SimpleNegativeV3, an12, rfneg, verybadimagenegative_v1.3, easynegative, text, watermark, censored, (mosaic censoring:1.3), teeth, (animal ears:2)",
        width = 512, 
        height = 512,
        guidance_scale=1.0,
        num_inference_steps=8,
        num_images_per_prompt=1,
    ).images[0] 
    
    return image


examples = [
    "A cute kitten, Japanese cartoon style.",
    "A sweet family, dad stands next to mom, mom holds baby girl.",
    "A delicious ceviche cheesecake slice",
]

css="""
#col-container {
    margin: 0 auto;
    max-width: 520px;
}
"""


power_device = "CPU"

with gr.Blocks(css=css) as demo:
    
    with gr.Column(elem_id="col-container"):
        gr.Markdown(f"""
        # Disty0/LCM_SoteMix 512x512
        Currently running on {power_device}.
        """)
        
        with gr.Row():
            prompt = gr.Text(
                label="Prompt",
                show_label=False,
                max_lines=1,
                placeholder="Enter your prompt",
                container=False,
            )         
            run_button = gr.Button("Run", scale=0)
        
        result = gr.Image(label="Result", show_label=False)

        gr.Examples(
            examples = examples,
            inputs = [prompt]
        )

    run_button.click(
        fn = infer,
        inputs = [prompt],
        outputs = [result]
    )

demo.queue().launch()