File size: 3,639 Bytes
1487e43
 
11d8a12
 
1487e43
 
 
11d8a12
 
 
 
1487e43
11d8a12
1487e43
 
11d8a12
 
39c626a
11d8a12
 
39c626a
11d8a12
 
cccff19
39c626a
11d8a12
 
 
 
 
 
 
1487e43
cccff19
11d8a12
1487e43
11d8a12
1487e43
11d8a12
 
1487e43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cccff19
1487e43
11d8a12
1487e43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11d8a12
 
 
 
 
 
1487e43
 
 
 
 
 
11d8a12
1487e43
11d8a12
1487e43
 
 
 
 
 
 
 
 
11d8a12
1487e43
 
 
11d8a12
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import gradio as gr
import numpy as np
from optimum.intel import OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVLatentConsistencyModelPipeline
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from diffusers import DiffusionPipeline


#model_id = "echarlaix/sdxl-turbo-openvino-int8"
#model_id = "echarlaix/LCM_Dreamshaper_v7-openvino"
#model_id = "OpenVINO/LCM_Dreamshaper_v7-int8-ov"
model_id = "stabilityai/stable-diffusion-xl-base-1.0"

#safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")


#pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False, safety_checker=safety_checker)
#pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False)
pipeline = OVStableDiffusionXLPipeline.from_pretrained(model_id)

batch_size, num_images, height, width = 1, 1, 1024, 512
#pipeline.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
#pipeline.load_textual_inversion("./badhandv4.pt", "badhandv4")
#hiten1
#pipeline.load_textual_inversion("./hiten1.pt", "hiten1")
#pipeline.compile()

#TypeError: LatentConsistencyPipelineMixin.__call__() got an unexpected keyword argument 'negative_prompt'
negative_prompt="easynegative,bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs, nsfw, nude, censored,  "

def infer(prompt, num_inference_steps):

    image = pipeline(
        prompt = prompt, 
        negative_prompt = negative_prompt,
        guidance_scale = 7.0,
        num_inference_steps = num_inference_steps, 
        width = width,
        height = height,
        num_images_per_prompt=num_images,
    ).images[0]
    
    return image

examples = [
    "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
    "An astronaut riding a green horse",
    "A delicious ceviche cheesecake slice",
]

css="""
#col-container {
    margin: 0 auto;
    max-width: 520px;
}
"""


with gr.Blocks(css=css) as demo:
    
    with gr.Column(elem_id="col-container"):
        gr.Markdown(f"""
        # Demo :stabilityai/stable-diffusion-xl-base-1.0
        """)

        with gr.Row():
            
            prompt = gr.Text(
                label="Prompt",
                show_label=False,
                max_lines=1,
                placeholder="Enter your prompt",
                container=False,
            )
            
            run_button = gr.Button("Run", scale=0)
        
        result = gr.Image(label="Result", show_label=False)

        with gr.Accordion("Advanced Settings", open=False):
            #with gr.Row():
            # negative_prompt = gr.Text(
            #     label="Negative prompt",
            #     max_lines=1,
            #     placeholder="Enter a negative prompt",
            # )
            
            with gr.Row():
                
                num_inference_steps = gr.Slider(
                    label="Number of inference steps",
                    minimum=1,
                    maximum=10,
                    step=1,
                    value=30,
                )
        
        gr.Examples(
            examples = examples,
            inputs = [prompt]
        )

    run_button.click(
        fn = infer,
        inputs = [prompt, num_inference_steps],
        outputs = [result]
    )

demo.queue().launch(share=True)