Spaces:
Runtime error
Runtime error
Commit
Β·
0508515
1
Parent(s):
be68220
Move ControlNet up so pipe available for concept population
Browse files
app.py
CHANGED
@@ -44,8 +44,9 @@ my_token = os.environ['api_key']
|
|
44 |
|
45 |
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", revision="fp16", torch_dtype=torch.float16, use_auth_token=my_token).to("cuda")
|
46 |
|
|
|
47 |
def check_prompt(prompt):
|
48 |
-
SPAM_WORDS = ['Π', '
|
49 |
for spam_word in SPAM_WORDS:
|
50 |
if spam_word in prompt:
|
51 |
return False
|
@@ -81,6 +82,77 @@ def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, tok
|
|
81 |
return token
|
82 |
|
83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
ahx_model_list = [model for model in models_list if "ahx" in model.modelId]
|
85 |
ahx_dropdown_list = [model for model in models_list if "ahx-model" in model.modelId]
|
86 |
|
@@ -119,6 +191,7 @@ for model in ahx_model_list:
|
|
119 |
#if token cannot be loaded, skip it
|
120 |
try:
|
121 |
learned_token = load_learned_embed_in_clip(f"{model_id}/learned_embeds.bin", pipe.text_encoder, pipe.tokenizer, token_name)
|
|
|
122 |
except:
|
123 |
continue
|
124 |
model_content["token"] = learned_token
|
@@ -412,69 +485,6 @@ canny_interface = gr.Interface(fn=canny_process_image, inputs=[canny_input_image
|
|
412 |
|
413 |
|
414 |
|
415 |
-
# ----- ControlNet Canny Edges -----------------------------------------------------------------
|
416 |
-
|
417 |
-
# import gradio as gr
|
418 |
-
# from PIL import Image
|
419 |
-
# import numpy as np
|
420 |
-
# import cv2
|
421 |
-
|
422 |
-
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
423 |
-
from diffusers import UniPCMultistepScheduler
|
424 |
-
import torch
|
425 |
-
|
426 |
-
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
427 |
-
controlnet_pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
428 |
-
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
429 |
-
)
|
430 |
-
|
431 |
-
controlnet_pipe.scheduler = UniPCMultistepScheduler.from_config(controlnet_pipe.scheduler.config)
|
432 |
-
controlnet_pipe.enable_model_cpu_offload()
|
433 |
-
controlnet_pipe.enable_xformers_memory_efficient_attention()
|
434 |
-
|
435 |
-
def controlnet_edges(canny_input_prompt, input_image, input_low_threshold, input_high_threshold, input_invert):
|
436 |
-
np_image = np.array(input_image)
|
437 |
-
|
438 |
-
output_image = input_image
|
439 |
-
numpy_image = np.array(output_image)
|
440 |
-
|
441 |
-
low_threshold = 80
|
442 |
-
high_threshold = 100
|
443 |
-
canny_1 = cv2.Canny(numpy_image, input_low_threshold, input_high_threshold)
|
444 |
-
canny_1 = canny_1[:, :, None]
|
445 |
-
canny_1 = np.concatenate([canny_1, canny_1, canny_1], axis=2)
|
446 |
-
if input_invert:
|
447 |
-
canny_1 = 255 - canny_1
|
448 |
-
|
449 |
-
canny_2 = Image.fromarray(canny_1)
|
450 |
-
|
451 |
-
prompt = canny_input_prompt
|
452 |
-
generator = torch.Generator(device="cpu").manual_seed(2)
|
453 |
-
|
454 |
-
output_image = controlnet_pipe(
|
455 |
-
prompt,
|
456 |
-
canny_2,
|
457 |
-
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
|
458 |
-
generator=generator,
|
459 |
-
num_inference_steps=20,
|
460 |
-
)
|
461 |
-
|
462 |
-
return output_image[0][0]
|
463 |
-
|
464 |
-
|
465 |
-
canny_input_prompt = gr.inputs.Textbox(label="Enter a single word or phrase")
|
466 |
-
canny_input_image = gr.inputs.Image()
|
467 |
-
canny_input_low_threshold = gr.inputs.Slider(minimum=0, maximum=1000, step=1, label="Lower Threshold:", default=100)
|
468 |
-
canny_input_high_threshold = gr.inputs.Slider(minimum=0, maximum=1000, step=1, label="Upper Threshold:", default=200)
|
469 |
-
canny_input_invert = gr.inputs.Checkbox(label="Invert Image")
|
470 |
-
canny_outputs = gr.outputs.Image(type="pil")
|
471 |
-
|
472 |
-
# make and launch the gradio app...
|
473 |
-
controlnet_canny_interface = gr.Interface(fn=controlnet_edges, inputs=[canny_input_prompt, canny_input_image, canny_input_low_threshold, canny_input_high_threshold, canny_input_invert], outputs=canny_outputs, title='Canny Edge Tracing', allow_flagging='never')
|
474 |
-
# controlnet_canny_interface.launch()
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
|
479 |
# ----- Launch Tabs -----------------------------------------------------------------
|
480 |
|
|
|
44 |
|
45 |
pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", revision="fp16", torch_dtype=torch.float16, use_auth_token=my_token).to("cuda")
|
46 |
|
47 |
+
|
48 |
def check_prompt(prompt):
|
49 |
+
SPAM_WORDS = ['Π', 'oob', 'reast'] # only necessary to limit spam
|
50 |
for spam_word in SPAM_WORDS:
|
51 |
if spam_word in prompt:
|
52 |
return False
|
|
|
82 |
return token
|
83 |
|
84 |
|
85 |
+
|
86 |
+
# ----- ControlNet Canny Edges -----------------------------------------------------------------
|
87 |
+
|
88 |
+
import gradio as gr
|
89 |
+
from PIL import Image
|
90 |
+
import numpy as np
|
91 |
+
import cv2
|
92 |
+
|
93 |
+
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
94 |
+
from diffusers import UniPCMultistepScheduler
|
95 |
+
import torch
|
96 |
+
|
97 |
+
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
98 |
+
controlnet_pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
99 |
+
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
100 |
+
)
|
101 |
+
|
102 |
+
controlnet_pipe.scheduler = UniPCMultistepScheduler.from_config(controlnet_pipe.scheduler.config)
|
103 |
+
controlnet_pipe.enable_model_cpu_offload()
|
104 |
+
controlnet_pipe.enable_xformers_memory_efficient_attention()
|
105 |
+
|
106 |
+
def controlnet_edges(canny_input_prompt, input_image, input_low_threshold, input_high_threshold, input_invert):
|
107 |
+
np_image = np.array(input_image)
|
108 |
+
|
109 |
+
output_image = input_image
|
110 |
+
numpy_image = np.array(output_image)
|
111 |
+
|
112 |
+
low_threshold = 80
|
113 |
+
high_threshold = 100
|
114 |
+
canny_1 = cv2.Canny(numpy_image, input_low_threshold, input_high_threshold)
|
115 |
+
canny_1 = canny_1[:, :, None]
|
116 |
+
canny_1 = np.concatenate([canny_1, canny_1, canny_1], axis=2)
|
117 |
+
if input_invert:
|
118 |
+
canny_1 = 255 - canny_1
|
119 |
+
|
120 |
+
canny_2 = Image.fromarray(canny_1)
|
121 |
+
|
122 |
+
prompt = canny_input_prompt
|
123 |
+
generator = torch.Generator(device="cpu").manual_seed(2)
|
124 |
+
|
125 |
+
output_image = controlnet_pipe(
|
126 |
+
prompt,
|
127 |
+
canny_2,
|
128 |
+
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
|
129 |
+
generator=generator,
|
130 |
+
num_inference_steps=20,
|
131 |
+
)
|
132 |
+
|
133 |
+
return output_image[0][0]
|
134 |
+
|
135 |
+
|
136 |
+
canny_input_prompt = gr.inputs.Textbox(label="Enter a single word or phrase")
|
137 |
+
canny_input_image = gr.inputs.Image()
|
138 |
+
canny_input_low_threshold = gr.inputs.Slider(minimum=0, maximum=1000, step=1, label="Lower Threshold:", default=100)
|
139 |
+
canny_input_high_threshold = gr.inputs.Slider(minimum=0, maximum=1000, step=1, label="Upper Threshold:", default=200)
|
140 |
+
canny_input_invert = gr.inputs.Checkbox(label="Invert Image")
|
141 |
+
canny_outputs = gr.outputs.Image(type="pil")
|
142 |
+
|
143 |
+
# make and launch the gradio app...
|
144 |
+
controlnet_canny_interface = gr.Interface(fn=controlnet_edges, inputs=[canny_input_prompt, canny_input_image, canny_input_low_threshold, canny_input_high_threshold, canny_input_invert], outputs=canny_outputs, title='Canny Edge Tracing', allow_flagging='never')
|
145 |
+
# controlnet_canny_interface.launch()
|
146 |
+
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
|
151 |
+
|
152 |
+
|
153 |
+
# ----- Load All models / concepts -----------------------------------------------------------------
|
154 |
+
|
155 |
+
|
156 |
ahx_model_list = [model for model in models_list if "ahx" in model.modelId]
|
157 |
ahx_dropdown_list = [model for model in models_list if "ahx-model" in model.modelId]
|
158 |
|
|
|
191 |
#if token cannot be loaded, skip it
|
192 |
try:
|
193 |
learned_token = load_learned_embed_in_clip(f"{model_id}/learned_embeds.bin", pipe.text_encoder, pipe.tokenizer, token_name)
|
194 |
+
# _learned_token_controlnet = load_learned_embed_in_clip(f"{model_id}/learned_embeds.bin", controlnet_pipe.text_encoder, controlnet_pipe.tokenizer, token_name)
|
195 |
except:
|
196 |
continue
|
197 |
model_content["token"] = learned_token
|
|
|
485 |
|
486 |
|
487 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
488 |
|
489 |
# ----- Launch Tabs -----------------------------------------------------------------
|
490 |
|