Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,39 +1,43 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
-
import
|
|
|
4 |
from diffusers import DiffusionPipeline
|
5 |
-
import torch
|
6 |
|
7 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
pipe = pipe.to(device)
|
14 |
-
else:
|
15 |
-
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
|
16 |
-
pipe = pipe.to(device)
|
17 |
|
18 |
-
|
19 |
-
MAX_IMAGE_SIZE = 1024
|
20 |
|
21 |
-
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
prompt = prompt,
|
30 |
-
negative_prompt = negative_prompt,
|
31 |
-
guidance_scale =
|
32 |
num_inference_steps = num_inference_steps,
|
33 |
-
width = width,
|
34 |
height = height,
|
35 |
-
|
36 |
-
).images[0]
|
37 |
|
38 |
return image
|
39 |
|
@@ -50,19 +54,14 @@ css="""
|
|
50 |
}
|
51 |
"""
|
52 |
|
53 |
-
if torch.cuda.is_available():
|
54 |
-
power_device = "GPU"
|
55 |
-
else:
|
56 |
-
power_device = "CPU"
|
57 |
|
58 |
with gr.Blocks(css=css) as demo:
|
59 |
|
60 |
with gr.Column(elem_id="col-container"):
|
61 |
gr.Markdown(f"""
|
62 |
-
#
|
63 |
-
Currently running on {power_device}.
|
64 |
""")
|
65 |
-
|
66 |
with gr.Row():
|
67 |
|
68 |
prompt = gr.Text(
|
@@ -78,58 +77,21 @@ with gr.Blocks(css=css) as demo:
|
|
78 |
result = gr.Image(label="Result", show_label=False)
|
79 |
|
80 |
with gr.Accordion("Advanced Settings", open=False):
|
81 |
-
|
82 |
-
negative_prompt = gr.Text(
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
)
|
88 |
-
|
89 |
-
seed = gr.Slider(
|
90 |
-
label="Seed",
|
91 |
-
minimum=0,
|
92 |
-
maximum=MAX_SEED,
|
93 |
-
step=1,
|
94 |
-
value=0,
|
95 |
-
)
|
96 |
-
|
97 |
-
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
98 |
|
99 |
with gr.Row():
|
100 |
|
101 |
-
width = gr.Slider(
|
102 |
-
label="Width",
|
103 |
-
minimum=256,
|
104 |
-
maximum=MAX_IMAGE_SIZE,
|
105 |
-
step=32,
|
106 |
-
value=512,
|
107 |
-
)
|
108 |
-
|
109 |
-
height = gr.Slider(
|
110 |
-
label="Height",
|
111 |
-
minimum=256,
|
112 |
-
maximum=MAX_IMAGE_SIZE,
|
113 |
-
step=32,
|
114 |
-
value=512,
|
115 |
-
)
|
116 |
-
|
117 |
-
with gr.Row():
|
118 |
-
|
119 |
-
guidance_scale = gr.Slider(
|
120 |
-
label="Guidance scale",
|
121 |
-
minimum=0.0,
|
122 |
-
maximum=10.0,
|
123 |
-
step=0.1,
|
124 |
-
value=0.0,
|
125 |
-
)
|
126 |
-
|
127 |
num_inference_steps = gr.Slider(
|
128 |
label="Number of inference steps",
|
129 |
minimum=1,
|
130 |
-
maximum=
|
131 |
step=1,
|
132 |
-
value=
|
133 |
)
|
134 |
|
135 |
gr.Examples(
|
@@ -139,8 +101,8 @@ with gr.Blocks(css=css) as demo:
|
|
139 |
|
140 |
run_button.click(
|
141 |
fn = infer,
|
142 |
-
inputs = [prompt,
|
143 |
outputs = [result]
|
144 |
)
|
145 |
|
146 |
-
demo.queue().launch()
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
+
from optimum.intel import OVStableDiffusionPipeline, OVStableDiffusionXLPipeline, OVLatentConsistencyModelPipeline
|
4 |
+
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
|
5 |
from diffusers import DiffusionPipeline
|
|
|
6 |
|
|
|
7 |
|
8 |
+
#model_id = "echarlaix/sdxl-turbo-openvino-int8"
|
9 |
+
#model_id = "echarlaix/LCM_Dreamshaper_v7-openvino"
|
10 |
+
#model_id = "OpenVINO/LCM_Dreamshaper_v7-int8-ov"
|
11 |
+
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
|
|
|
|
|
|
|
|
12 |
|
13 |
+
#safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
|
|
|
14 |
|
|
|
15 |
|
16 |
+
#pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False, safety_checker=safety_checker)
|
17 |
+
#pipeline = OVLatentConsistencyModelPipeline.from_pretrained(model_id, compile=False)
|
18 |
+
pipeline = OVStableDiffusionXLPipeline.from_pretrained(model_id, compile=False, export=True)
|
19 |
+
|
20 |
+
batch_size, num_images, height, width = 1, 1, 1024, 512
|
21 |
+
pipeline.reshape(batch_size=batch_size, height=height, width=width, num_images_per_prompt=num_images)
|
22 |
+
#pipeline.load_textual_inversion("./badhandv4.pt", "badhandv4")
|
23 |
+
#hiten1
|
24 |
+
pipeline.load_textual_inversion("./hiten1.pt", "hiten1")
|
25 |
+
pipeline.compile()
|
26 |
+
|
27 |
+
#TypeError: LatentConsistencyPipelineMixin.__call__() got an unexpected keyword argument 'negative_prompt'
|
28 |
+
negative_prompt="easynegative,bad anatomy, bad hands, missing fingers, extra fingers, three hands, three legs, bad arms, missing legs, missing arms, poorly drawn face, bad face, fused face, cloned face, three crus, fused feet, fused thigh, extra crus, ugly fingers, horn, cartoon, cg, 3d, unreal, animate, amputation, disconnected limbs, nsfw, nude, censored, "
|
29 |
+
|
30 |
+
def infer(prompt, num_inference_steps):
|
31 |
+
|
32 |
+
image = pipeline(
|
33 |
prompt = prompt,
|
34 |
+
#negative_prompt = negative_prompt,
|
35 |
+
guidance_scale = 7.0,
|
36 |
num_inference_steps = num_inference_steps,
|
37 |
+
width = width,
|
38 |
height = height,
|
39 |
+
num_images_per_prompt=num_images,
|
40 |
+
).images[0]
|
41 |
|
42 |
return image
|
43 |
|
|
|
54 |
}
|
55 |
"""
|
56 |
|
|
|
|
|
|
|
|
|
57 |
|
58 |
with gr.Blocks(css=css) as demo:
|
59 |
|
60 |
with gr.Column(elem_id="col-container"):
|
61 |
gr.Markdown(f"""
|
62 |
+
# Demo : [Fast LCM](https://huggingface.co/OpenVINO/LCM_Dreamshaper_v7-int8-ov) quantized with NNCF ⚡
|
|
|
63 |
""")
|
64 |
+
|
65 |
with gr.Row():
|
66 |
|
67 |
prompt = gr.Text(
|
|
|
77 |
result = gr.Image(label="Result", show_label=False)
|
78 |
|
79 |
with gr.Accordion("Advanced Settings", open=False):
|
80 |
+
#with gr.Row():
|
81 |
+
# negative_prompt = gr.Text(
|
82 |
+
# label="Negative prompt",
|
83 |
+
# max_lines=1,
|
84 |
+
# placeholder="Enter a negative prompt",
|
85 |
+
# )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
with gr.Row():
|
88 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
num_inference_steps = gr.Slider(
|
90 |
label="Number of inference steps",
|
91 |
minimum=1,
|
92 |
+
maximum=10,
|
93 |
step=1,
|
94 |
+
value=30,
|
95 |
)
|
96 |
|
97 |
gr.Examples(
|
|
|
101 |
|
102 |
run_button.click(
|
103 |
fn = infer,
|
104 |
+
inputs = [prompt, num_inference_steps],
|
105 |
outputs = [result]
|
106 |
)
|
107 |
|
108 |
+
demo.queue().launch(share=True)
|