Spaces:
Building
on
A10G
Building
on
A10G
Upload 27 files
Browse files- diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py +7 -7
- diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_depth.py +23 -8
- diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_hed.py +23 -8
- diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_mlsd.py +23 -7
- diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_pose.py +23 -9
- diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_scribble.py +22 -8
- diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_seg.py +18 -5
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py
CHANGED
@@ -8,7 +8,7 @@ from PIL import Image
|
|
8 |
|
9 |
from diffusion_webui.utils.model_list import (
|
10 |
controlnet_canny_model_list,
|
11 |
-
|
12 |
)
|
13 |
from diffusion_webui.utils.scheduler_list import (
|
14 |
SCHEDULER_LIST,
|
@@ -40,9 +40,9 @@ class StableDiffusionControlNetInpaintCannyGenerator:
|
|
40 |
|
41 |
return self.pipe
|
42 |
|
43 |
-
def
|
44 |
image = np.array(image_path)
|
45 |
-
image = Image.fromarray(
|
46 |
return image
|
47 |
|
48 |
def controlnet_canny_inpaint(
|
@@ -77,8 +77,8 @@ class StableDiffusionControlNetInpaintCannyGenerator:
|
|
77 |
normal_image = image_path["image"].convert("RGB").resize((512, 512))
|
78 |
mask_image = image_path["mask"].convert("RGB").resize((512, 512))
|
79 |
|
80 |
-
normal_image = self.
|
81 |
-
mask_image = self.
|
82 |
|
83 |
control_image = self.controlnet_canny_inpaint(image_path=image_path)
|
84 |
pipe = self.load_model(
|
@@ -133,8 +133,8 @@ class StableDiffusionControlNetInpaintCannyGenerator:
|
|
133 |
with gr.Column():
|
134 |
controlnet_canny_inpaint_stable_model_id = (
|
135 |
gr.Dropdown(
|
136 |
-
choices=
|
137 |
-
value=
|
138 |
label="Stable Model Id",
|
139 |
)
|
140 |
)
|
|
|
8 |
|
9 |
from diffusion_webui.utils.model_list import (
|
10 |
controlnet_canny_model_list,
|
11 |
+
stable_inpiant_model_list,
|
12 |
)
|
13 |
from diffusion_webui.utils.scheduler_list import (
|
14 |
SCHEDULER_LIST,
|
|
|
40 |
|
41 |
return self.pipe
|
42 |
|
43 |
+
def load_image(self, image_path):
|
44 |
image = np.array(image_path)
|
45 |
+
image = Image.fromarray(image_path)
|
46 |
return image
|
47 |
|
48 |
def controlnet_canny_inpaint(
|
|
|
77 |
normal_image = image_path["image"].convert("RGB").resize((512, 512))
|
78 |
mask_image = image_path["mask"].convert("RGB").resize((512, 512))
|
79 |
|
80 |
+
normal_image = self.load_image(image_path=normal_image)
|
81 |
+
mask_image = self.load_image(image_path=mask_image)
|
82 |
|
83 |
control_image = self.controlnet_canny_inpaint(image_path=image_path)
|
84 |
pipe = self.load_model(
|
|
|
133 |
with gr.Column():
|
134 |
controlnet_canny_inpaint_stable_model_id = (
|
135 |
gr.Dropdown(
|
136 |
+
choices=stable_inpiant_model_list,
|
137 |
+
value=stable_inpiant_model_list[0],
|
138 |
label="Stable Model Id",
|
139 |
)
|
140 |
)
|
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_depth.py
CHANGED
@@ -1,13 +1,14 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
-
from diffusers import ControlNetModel
|
|
|
5 |
from PIL import Image
|
6 |
from transformers import pipeline
|
7 |
|
8 |
from diffusion_webui.utils.model_list import (
|
9 |
controlnet_depth_model_list,
|
10 |
-
|
11 |
)
|
12 |
from diffusion_webui.utils.scheduler_list import (
|
13 |
SCHEDULER_LIST,
|
@@ -26,7 +27,7 @@ class StableDiffusionControlInpaintNetDepthGenerator:
|
|
26 |
controlnet = ControlNetModel.from_pretrained(
|
27 |
controlnet_model_path, torch_dtype=torch.float16
|
28 |
)
|
29 |
-
self.pipe =
|
30 |
pretrained_model_name_or_path=stable_model_path,
|
31 |
controlnet=controlnet,
|
32 |
safety_checker=None,
|
@@ -39,6 +40,12 @@ class StableDiffusionControlInpaintNetDepthGenerator:
|
|
39 |
|
40 |
return self.pipe
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
def controlnet_inpaint_depth(self, image_path: str):
|
43 |
depth_estimator = pipeline("depth-estimation")
|
44 |
image = image_path["image"].convert("RGB").resize((512, 512))
|
@@ -64,8 +71,13 @@ class StableDiffusionControlInpaintNetDepthGenerator:
|
|
64 |
scheduler: str,
|
65 |
seed_generator: int,
|
66 |
):
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
pipe = self.load_model(
|
71 |
stable_model_path=stable_model_path,
|
@@ -81,7 +93,10 @@ class StableDiffusionControlInpaintNetDepthGenerator:
|
|
81 |
|
82 |
output = pipe(
|
83 |
prompt=prompt,
|
84 |
-
|
|
|
|
|
|
|
85 |
negative_prompt=negative_prompt,
|
86 |
num_images_per_prompt=num_images_per_prompt,
|
87 |
num_inference_steps=num_inference_step,
|
@@ -117,8 +132,8 @@ class StableDiffusionControlInpaintNetDepthGenerator:
|
|
117 |
with gr.Column():
|
118 |
controlnet_depth_inpaint_stable_model_id = (
|
119 |
gr.Dropdown(
|
120 |
-
choices=
|
121 |
-
value=
|
122 |
label="Stable Model Id",
|
123 |
)
|
124 |
)
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
+
from diffusers import ControlNetModel
|
5 |
+
from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
|
6 |
from PIL import Image
|
7 |
from transformers import pipeline
|
8 |
|
9 |
from diffusion_webui.utils.model_list import (
|
10 |
controlnet_depth_model_list,
|
11 |
+
stable_inpiant_model_list,
|
12 |
)
|
13 |
from diffusion_webui.utils.scheduler_list import (
|
14 |
SCHEDULER_LIST,
|
|
|
27 |
controlnet = ControlNetModel.from_pretrained(
|
28 |
controlnet_model_path, torch_dtype=torch.float16
|
29 |
)
|
30 |
+
self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
31 |
pretrained_model_name_or_path=stable_model_path,
|
32 |
controlnet=controlnet,
|
33 |
safety_checker=None,
|
|
|
40 |
|
41 |
return self.pipe
|
42 |
|
43 |
+
def load_image(self, image_path):
|
44 |
+
image = np.array(image_path)
|
45 |
+
image = Image.fromarray(image_path)
|
46 |
+
return image
|
47 |
+
|
48 |
+
|
49 |
def controlnet_inpaint_depth(self, image_path: str):
|
50 |
depth_estimator = pipeline("depth-estimation")
|
51 |
image = image_path["image"].convert("RGB").resize((512, 512))
|
|
|
71 |
scheduler: str,
|
72 |
seed_generator: int,
|
73 |
):
|
74 |
+
normal_image = image_path["image"].convert("RGB").resize((512, 512))
|
75 |
+
mask_image = image_path["mask"].convert("RGB").resize((512, 512))
|
76 |
+
|
77 |
+
normal_image = self.load_image(image_path=normal_image)
|
78 |
+
mask_image = self.load_image(image_path=mask_image)
|
79 |
+
|
80 |
+
control_image = self.controlnet_inpaint_depth(image_path=image_path)
|
81 |
|
82 |
pipe = self.load_model(
|
83 |
stable_model_path=stable_model_path,
|
|
|
93 |
|
94 |
output = pipe(
|
95 |
prompt=prompt,
|
96 |
+
|
97 |
+
image=normal_image,
|
98 |
+
mask_image=mask_image,
|
99 |
+
control_image=control_image,
|
100 |
negative_prompt=negative_prompt,
|
101 |
num_images_per_prompt=num_images_per_prompt,
|
102 |
num_inference_steps=num_inference_step,
|
|
|
132 |
with gr.Column():
|
133 |
controlnet_depth_inpaint_stable_model_id = (
|
134 |
gr.Dropdown(
|
135 |
+
choices=stable_inpiant_model_list,
|
136 |
+
value=stable_inpiant_model_list[0],
|
137 |
label="Stable Model Id",
|
138 |
)
|
139 |
)
|
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_hed.py
CHANGED
@@ -2,16 +2,18 @@ import gradio as gr
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from controlnet_aux import HEDdetector
|
5 |
-
from diffusers import ControlNetModel
|
|
|
6 |
|
7 |
from diffusion_webui.utils.model_list import (
|
8 |
controlnet_hed_model_list,
|
9 |
-
|
10 |
)
|
11 |
from diffusion_webui.utils.scheduler_list import (
|
12 |
SCHEDULER_LIST,
|
13 |
get_scheduler_list,
|
14 |
)
|
|
|
15 |
|
16 |
# https://github.com/mikonvergence/ControlNetInpaint
|
17 |
|
@@ -25,7 +27,7 @@ class StableDiffusionControlNetInpaintHedGenerator:
|
|
25 |
controlnet = ControlNetModel.from_pretrained(
|
26 |
controlnet_model_path, torch_dtype=torch.float16
|
27 |
)
|
28 |
-
self.pipe =
|
29 |
pretrained_model_name_or_path=stable_model_path,
|
30 |
controlnet=controlnet,
|
31 |
safety_checker=None,
|
@@ -38,6 +40,12 @@ class StableDiffusionControlNetInpaintHedGenerator:
|
|
38 |
|
39 |
return self.pipe
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
def controlnet_inpaint_hed(self, image_path: str):
|
42 |
hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
|
43 |
image = image_path["image"].convert("RGB").resize((512, 512))
|
@@ -60,8 +68,13 @@ class StableDiffusionControlNetInpaintHedGenerator:
|
|
60 |
scheduler: str,
|
61 |
seed_generator: int,
|
62 |
):
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
pipe = self.load_model(
|
67 |
stable_model_path=stable_model_path,
|
@@ -77,7 +90,9 @@ class StableDiffusionControlNetInpaintHedGenerator:
|
|
77 |
|
78 |
output = pipe(
|
79 |
prompt=prompt,
|
80 |
-
image=
|
|
|
|
|
81 |
negative_prompt=negative_prompt,
|
82 |
num_images_per_prompt=num_images_per_prompt,
|
83 |
num_inference_steps=num_inference_step,
|
@@ -113,8 +128,8 @@ class StableDiffusionControlNetInpaintHedGenerator:
|
|
113 |
with gr.Column():
|
114 |
controlnet_hed_inpaint_stable_model_id = (
|
115 |
gr.Dropdown(
|
116 |
-
choices=
|
117 |
-
value=
|
118 |
label="Stable Model Id",
|
119 |
)
|
120 |
)
|
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from controlnet_aux import HEDdetector
|
5 |
+
from diffusers import ControlNetModel
|
6 |
+
from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
|
7 |
|
8 |
from diffusion_webui.utils.model_list import (
|
9 |
controlnet_hed_model_list,
|
10 |
+
stable_inpiant_model_list,
|
11 |
)
|
12 |
from diffusion_webui.utils.scheduler_list import (
|
13 |
SCHEDULER_LIST,
|
14 |
get_scheduler_list,
|
15 |
)
|
16 |
+
from PIL import Image
|
17 |
|
18 |
# https://github.com/mikonvergence/ControlNetInpaint
|
19 |
|
|
|
27 |
controlnet = ControlNetModel.from_pretrained(
|
28 |
controlnet_model_path, torch_dtype=torch.float16
|
29 |
)
|
30 |
+
self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
31 |
pretrained_model_name_or_path=stable_model_path,
|
32 |
controlnet=controlnet,
|
33 |
safety_checker=None,
|
|
|
40 |
|
41 |
return self.pipe
|
42 |
|
43 |
+
def load_image(self, image_path):
|
44 |
+
image = np.array(image_path)
|
45 |
+
image = Image.fromarray(image_path)
|
46 |
+
return image
|
47 |
+
|
48 |
+
|
49 |
def controlnet_inpaint_hed(self, image_path: str):
|
50 |
hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
|
51 |
image = image_path["image"].convert("RGB").resize((512, 512))
|
|
|
68 |
scheduler: str,
|
69 |
seed_generator: int,
|
70 |
):
|
71 |
+
normal_image = image_path["image"].convert("RGB").resize((512, 512))
|
72 |
+
mask_image = image_path["mask"].convert("RGB").resize((512, 512))
|
73 |
+
|
74 |
+
normal_image = self.load_image(image_path=normal_image)
|
75 |
+
mask_image = self.load_image(image_path=mask_image)
|
76 |
+
|
77 |
+
control_image = self.controlnet_inpaint_hed(image_path=image_path)
|
78 |
|
79 |
pipe = self.load_model(
|
80 |
stable_model_path=stable_model_path,
|
|
|
90 |
|
91 |
output = pipe(
|
92 |
prompt=prompt,
|
93 |
+
image=normal_image,
|
94 |
+
mask_image=mask_image,
|
95 |
+
control_image=control_image,
|
96 |
negative_prompt=negative_prompt,
|
97 |
num_images_per_prompt=num_images_per_prompt,
|
98 |
num_inference_steps=num_inference_step,
|
|
|
128 |
with gr.Column():
|
129 |
controlnet_hed_inpaint_stable_model_id = (
|
130 |
gr.Dropdown(
|
131 |
+
choices=stable_inpiant_model_list,
|
132 |
+
value=stable_inpiant_model_list[0],
|
133 |
label="Stable Model Id",
|
134 |
)
|
135 |
)
|
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_mlsd.py
CHANGED
@@ -2,16 +2,18 @@ import gradio as gr
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from controlnet_aux import MLSDdetector
|
5 |
-
from diffusers import ControlNetModel
|
|
|
6 |
|
7 |
from diffusion_webui.utils.model_list import (
|
8 |
controlnet_mlsd_model_list,
|
9 |
-
|
10 |
)
|
11 |
from diffusion_webui.utils.scheduler_list import (
|
12 |
SCHEDULER_LIST,
|
13 |
get_scheduler_list,
|
14 |
)
|
|
|
15 |
|
16 |
# https://github.com/mikonvergence/ControlNetInpaint
|
17 |
|
@@ -25,7 +27,7 @@ class StableDiffusionControlNetInpaintMlsdGenerator:
|
|
25 |
controlnet = ControlNetModel.from_pretrained(
|
26 |
controlnet_model_path, torch_dtype=torch.float16
|
27 |
)
|
28 |
-
self.pipe =
|
29 |
pretrained_model_name_or_path=stable_model_path,
|
30 |
controlnet=controlnet,
|
31 |
safety_checker=None,
|
@@ -37,6 +39,12 @@ class StableDiffusionControlNetInpaintMlsdGenerator:
|
|
37 |
self.pipe.enable_xformers_memory_efficient_attention()
|
38 |
|
39 |
return self.pipe
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
def controlnet_inpaint_mlsd(self, image_path: str):
|
42 |
mlsd = MLSDdetector.from_pretrained("lllyasviel/ControlNet")
|
@@ -61,7 +69,13 @@ class StableDiffusionControlNetInpaintMlsdGenerator:
|
|
61 |
seed_generator: int,
|
62 |
):
|
63 |
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
pipe = self.load_model(
|
67 |
stable_model_path=stable_model_path,
|
@@ -77,7 +91,9 @@ class StableDiffusionControlNetInpaintMlsdGenerator:
|
|
77 |
|
78 |
output = pipe(
|
79 |
prompt=prompt,
|
80 |
-
image=
|
|
|
|
|
81 |
negative_prompt=negative_prompt,
|
82 |
num_images_per_prompt=num_images_per_prompt,
|
83 |
num_inference_steps=num_inference_step,
|
@@ -113,8 +129,8 @@ class StableDiffusionControlNetInpaintMlsdGenerator:
|
|
113 |
with gr.Column():
|
114 |
controlnet_mlsd_inpaint_stable_model_id = (
|
115 |
gr.Dropdown(
|
116 |
-
choices=
|
117 |
-
value=
|
118 |
label="Stable Model Id",
|
119 |
)
|
120 |
)
|
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from controlnet_aux import MLSDdetector
|
5 |
+
from diffusers import ControlNetModel
|
6 |
+
from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
|
7 |
|
8 |
from diffusion_webui.utils.model_list import (
|
9 |
controlnet_mlsd_model_list,
|
10 |
+
stable_inpiant_model_list,
|
11 |
)
|
12 |
from diffusion_webui.utils.scheduler_list import (
|
13 |
SCHEDULER_LIST,
|
14 |
get_scheduler_list,
|
15 |
)
|
16 |
+
from PIL import Image
|
17 |
|
18 |
# https://github.com/mikonvergence/ControlNetInpaint
|
19 |
|
|
|
27 |
controlnet = ControlNetModel.from_pretrained(
|
28 |
controlnet_model_path, torch_dtype=torch.float16
|
29 |
)
|
30 |
+
self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
31 |
pretrained_model_name_or_path=stable_model_path,
|
32 |
controlnet=controlnet,
|
33 |
safety_checker=None,
|
|
|
39 |
self.pipe.enable_xformers_memory_efficient_attention()
|
40 |
|
41 |
return self.pipe
|
42 |
+
|
43 |
+
def load_image(self, image_path):
|
44 |
+
image = np.array(image_path)
|
45 |
+
image = Image.fromarray(image_path)
|
46 |
+
return image
|
47 |
+
|
48 |
|
49 |
def controlnet_inpaint_mlsd(self, image_path: str):
|
50 |
mlsd = MLSDdetector.from_pretrained("lllyasviel/ControlNet")
|
|
|
69 |
seed_generator: int,
|
70 |
):
|
71 |
|
72 |
+
normal_image = image_path["image"].convert("RGB").resize((512, 512))
|
73 |
+
mask_image = image_path["mask"].convert("RGB").resize((512, 512))
|
74 |
+
|
75 |
+
normal_image = self.load_image(image_path=normal_image)
|
76 |
+
mask_image = self.load_image(image_path=mask_image)
|
77 |
+
|
78 |
+
control_image = self.controlnet_inpaint_mlsd(image_path=image_path)
|
79 |
|
80 |
pipe = self.load_model(
|
81 |
stable_model_path=stable_model_path,
|
|
|
91 |
|
92 |
output = pipe(
|
93 |
prompt=prompt,
|
94 |
+
image=normal_image,
|
95 |
+
mask_image=mask_image,
|
96 |
+
control_image=control_image,
|
97 |
negative_prompt=negative_prompt,
|
98 |
num_images_per_prompt=num_images_per_prompt,
|
99 |
num_inference_steps=num_inference_step,
|
|
|
129 |
with gr.Column():
|
130 |
controlnet_mlsd_inpaint_stable_model_id = (
|
131 |
gr.Dropdown(
|
132 |
+
choices=stable_inpiant_model_list,
|
133 |
+
value=stable_inpiant_model_list[0],
|
134 |
label="Stable Model Id",
|
135 |
)
|
136 |
)
|
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_pose.py
CHANGED
@@ -2,11 +2,13 @@ import gradio as gr
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from controlnet_aux import OpenposeDetector
|
5 |
-
from diffusers import ControlNetModel
|
|
|
|
|
6 |
|
7 |
from diffusion_webui.utils.model_list import (
|
8 |
controlnet_pose_model_list,
|
9 |
-
|
10 |
)
|
11 |
from diffusion_webui.utils.scheduler_list import (
|
12 |
SCHEDULER_LIST,
|
@@ -26,7 +28,7 @@ class StableDiffusionControlNetInpaintPoseGenerator:
|
|
26 |
controlnet_model_path, torch_dtype=torch.float16
|
27 |
)
|
28 |
|
29 |
-
self.pipe =
|
30 |
pretrained_model_name_or_path=stable_model_path,
|
31 |
controlnet=controlnet,
|
32 |
safety_checker=None,
|
@@ -38,7 +40,12 @@ class StableDiffusionControlNetInpaintPoseGenerator:
|
|
38 |
self.pipe.enable_xformers_memory_efficient_attention()
|
39 |
|
40 |
return self.pipe
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
42 |
def controlnet_pose_inpaint(self, image_path: str):
|
43 |
openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
|
44 |
|
@@ -62,8 +69,13 @@ class StableDiffusionControlNetInpaintPoseGenerator:
|
|
62 |
scheduler: str,
|
63 |
seed_generator: int,
|
64 |
):
|
65 |
-
|
66 |
-
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
pipe = self.load_model(
|
69 |
stable_model_path=stable_model_path,
|
@@ -79,7 +91,9 @@ class StableDiffusionControlNetInpaintPoseGenerator:
|
|
79 |
|
80 |
output = pipe(
|
81 |
prompt=prompt,
|
82 |
-
image=
|
|
|
|
|
83 |
negative_prompt=negative_prompt,
|
84 |
num_images_per_prompt=num_images_per_prompt,
|
85 |
num_inference_steps=num_inference_step,
|
@@ -115,8 +129,8 @@ class StableDiffusionControlNetInpaintPoseGenerator:
|
|
115 |
with gr.Column():
|
116 |
controlnet_pose_inpaint_stable_model_id = (
|
117 |
gr.Dropdown(
|
118 |
-
choices=
|
119 |
-
value=
|
120 |
label="Stable Model Id",
|
121 |
)
|
122 |
)
|
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from controlnet_aux import OpenposeDetector
|
5 |
+
from diffusers import ControlNetModel
|
6 |
+
from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
|
7 |
+
from PIL import Image
|
8 |
|
9 |
from diffusion_webui.utils.model_list import (
|
10 |
controlnet_pose_model_list,
|
11 |
+
stable_inpiant_model_list,
|
12 |
)
|
13 |
from diffusion_webui.utils.scheduler_list import (
|
14 |
SCHEDULER_LIST,
|
|
|
28 |
controlnet_model_path, torch_dtype=torch.float16
|
29 |
)
|
30 |
|
31 |
+
self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
32 |
pretrained_model_name_or_path=stable_model_path,
|
33 |
controlnet=controlnet,
|
34 |
safety_checker=None,
|
|
|
40 |
self.pipe.enable_xformers_memory_efficient_attention()
|
41 |
|
42 |
return self.pipe
|
43 |
+
|
44 |
+
def load_image(self, image_path):
|
45 |
+
image = np.array(image_path)
|
46 |
+
image = Image.fromarray(image_path)
|
47 |
+
return image
|
48 |
+
|
49 |
def controlnet_pose_inpaint(self, image_path: str):
|
50 |
openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
|
51 |
|
|
|
69 |
scheduler: str,
|
70 |
seed_generator: int,
|
71 |
):
|
72 |
+
normal_image = image_path["image"].convert("RGB").resize((512, 512))
|
73 |
+
mask_image = image_path["mask"].convert("RGB").resize((512, 512))
|
74 |
+
|
75 |
+
normal_image = self.load_image(image_path=normal_image)
|
76 |
+
mask_image = self.load_image(image_path=mask_image)
|
77 |
+
|
78 |
+
controlnet_image = self.controlnet_pose_inpaint(image_path=image_path)
|
79 |
|
80 |
pipe = self.load_model(
|
81 |
stable_model_path=stable_model_path,
|
|
|
91 |
|
92 |
output = pipe(
|
93 |
prompt=prompt,
|
94 |
+
image=normal_image,
|
95 |
+
mask_image=mask_image,
|
96 |
+
controlnet_image=controlnet_image,
|
97 |
negative_prompt=negative_prompt,
|
98 |
num_images_per_prompt=num_images_per_prompt,
|
99 |
num_inference_steps=num_inference_step,
|
|
|
129 |
with gr.Column():
|
130 |
controlnet_pose_inpaint_stable_model_id = (
|
131 |
gr.Dropdown(
|
132 |
+
choices=stable_inpiant_model_list,
|
133 |
+
value=stable_inpiant_model_list[0],
|
134 |
label="Stable Model Id",
|
135 |
)
|
136 |
)
|
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_scribble.py
CHANGED
@@ -2,11 +2,13 @@ import gradio as gr
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from controlnet_aux import HEDdetector
|
5 |
-
from diffusers import ControlNetModel
|
|
|
|
|
6 |
|
7 |
from diffusion_webui.utils.model_list import (
|
8 |
controlnet_scribble_model_list,
|
9 |
-
|
10 |
)
|
11 |
from diffusion_webui.utils.scheduler_list import (
|
12 |
SCHEDULER_LIST,
|
@@ -25,7 +27,7 @@ class StableDiffusionControlNetInpaintScribbleGenerator:
|
|
25 |
controlnet_model_path, torch_dtype=torch.float16
|
26 |
)
|
27 |
|
28 |
-
self.pipe =
|
29 |
pretrained_model_name_or_path=stable_model_path,
|
30 |
controlnet=controlnet,
|
31 |
safety_checker=None,
|
@@ -37,6 +39,11 @@ class StableDiffusionControlNetInpaintScribbleGenerator:
|
|
37 |
self.pipe.enable_xformers_memory_efficient_attention()
|
38 |
|
39 |
return self.pipe
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
def controlnet_inpaint_scribble(self, image_path: str):
|
42 |
hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
|
@@ -61,8 +68,13 @@ class StableDiffusionControlNetInpaintScribbleGenerator:
|
|
61 |
scheduler: str,
|
62 |
seed_generator: int,
|
63 |
):
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
pipe = self.load_model(
|
68 |
stable_model_path=stable_model_path,
|
@@ -78,7 +90,9 @@ class StableDiffusionControlNetInpaintScribbleGenerator:
|
|
78 |
|
79 |
output = pipe(
|
80 |
prompt=prompt,
|
81 |
-
image=
|
|
|
|
|
82 |
negative_prompt=negative_prompt,
|
83 |
num_images_per_prompt=num_images_per_prompt,
|
84 |
num_inference_steps=num_inference_step,
|
@@ -114,8 +128,8 @@ class StableDiffusionControlNetInpaintScribbleGenerator:
|
|
114 |
with gr.Column():
|
115 |
controlnet_scribble_inpaint_stable_model_id = (
|
116 |
gr.Dropdown(
|
117 |
-
choices=
|
118 |
-
value=
|
119 |
label="Stable Model Id",
|
120 |
)
|
121 |
)
|
|
|
2 |
import numpy as np
|
3 |
import torch
|
4 |
from controlnet_aux import HEDdetector
|
5 |
+
from diffusers import ControlNetModel
|
6 |
+
from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
|
7 |
+
from PIL import Image
|
8 |
|
9 |
from diffusion_webui.utils.model_list import (
|
10 |
controlnet_scribble_model_list,
|
11 |
+
stable_inpiant_model_list,
|
12 |
)
|
13 |
from diffusion_webui.utils.scheduler_list import (
|
14 |
SCHEDULER_LIST,
|
|
|
27 |
controlnet_model_path, torch_dtype=torch.float16
|
28 |
)
|
29 |
|
30 |
+
self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
31 |
pretrained_model_name_or_path=stable_model_path,
|
32 |
controlnet=controlnet,
|
33 |
safety_checker=None,
|
|
|
39 |
self.pipe.enable_xformers_memory_efficient_attention()
|
40 |
|
41 |
return self.pipe
|
42 |
+
|
43 |
+
def load_image(self, image_path):
|
44 |
+
image = np.array(image_path)
|
45 |
+
image = Image.fromarray(image_path)
|
46 |
+
return image
|
47 |
|
48 |
def controlnet_inpaint_scribble(self, image_path: str):
|
49 |
hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
|
|
|
68 |
scheduler: str,
|
69 |
seed_generator: int,
|
70 |
):
|
71 |
+
normal_image = image_path["image"].convert("RGB").resize((512, 512))
|
72 |
+
mask_image = image_path["mask"].convert("RGB").resize((512, 512))
|
73 |
+
|
74 |
+
normal_image = self.load_image(image_path=normal_image)
|
75 |
+
mask_image = self.load_image(image_path=mask_image)
|
76 |
+
|
77 |
+
controlnet_image = self.controlnet_inpaint_scribble(image_path=image_path)
|
78 |
|
79 |
pipe = self.load_model(
|
80 |
stable_model_path=stable_model_path,
|
|
|
90 |
|
91 |
output = pipe(
|
92 |
prompt=prompt,
|
93 |
+
image=normal_image,
|
94 |
+
mask_image=mask_image,
|
95 |
+
controlnet_image=controlnet_image,
|
96 |
negative_prompt=negative_prompt,
|
97 |
num_images_per_prompt=num_images_per_prompt,
|
98 |
num_inference_steps=num_inference_step,
|
|
|
128 |
with gr.Column():
|
129 |
controlnet_scribble_inpaint_stable_model_id = (
|
130 |
gr.Dropdown(
|
131 |
+
choices=stable_inpiant_model_list,
|
132 |
+
value=stable_inpiant_model_list[0],
|
133 |
label="Stable Model Id",
|
134 |
)
|
135 |
)
|
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_seg.py
CHANGED
@@ -7,7 +7,7 @@ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
|
|
7 |
|
8 |
from diffusion_webui.utils.model_list import (
|
9 |
controlnet_seg_model_list,
|
10 |
-
|
11 |
)
|
12 |
from diffusion_webui.utils.scheduler_list import (
|
13 |
SCHEDULER_LIST,
|
@@ -200,6 +200,11 @@ class StableDiffusionControlNetInpaintSegGenerator:
|
|
200 |
self.pipe.enable_xformers_memory_efficient_attention()
|
201 |
|
202 |
return self.pipe
|
|
|
|
|
|
|
|
|
|
|
203 |
|
204 |
def controlnet_seg_inpaint(self, image_path: str):
|
205 |
image_processor = AutoImageProcessor.from_pretrained(
|
@@ -246,7 +251,13 @@ class StableDiffusionControlNetInpaintSegGenerator:
|
|
246 |
seed_generator: int,
|
247 |
):
|
248 |
|
249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
|
251 |
pipe = self.load_model(
|
252 |
stable_model_path=stable_model_path,
|
@@ -262,7 +273,9 @@ class StableDiffusionControlNetInpaintSegGenerator:
|
|
262 |
|
263 |
output = pipe(
|
264 |
prompt=prompt,
|
265 |
-
image=
|
|
|
|
|
266 |
negative_prompt=negative_prompt,
|
267 |
num_images_per_prompt=num_images_per_prompt,
|
268 |
num_inference_steps=num_inference_step,
|
@@ -298,8 +311,8 @@ class StableDiffusionControlNetInpaintSegGenerator:
|
|
298 |
with gr.Column():
|
299 |
controlnet_seg_inpaint_stable_model_id = (
|
300 |
gr.Dropdown(
|
301 |
-
choices=
|
302 |
-
value=
|
303 |
label="Stable Model Id",
|
304 |
)
|
305 |
)
|
|
|
7 |
|
8 |
from diffusion_webui.utils.model_list import (
|
9 |
controlnet_seg_model_list,
|
10 |
+
stable_inpiant_model_list,
|
11 |
)
|
12 |
from diffusion_webui.utils.scheduler_list import (
|
13 |
SCHEDULER_LIST,
|
|
|
200 |
self.pipe.enable_xformers_memory_efficient_attention()
|
201 |
|
202 |
return self.pipe
|
203 |
+
|
204 |
+
def load_image(self, image_path):
|
205 |
+
image = np.array(image_path)
|
206 |
+
image = Image.fromarray(image_path)
|
207 |
+
return image
|
208 |
|
209 |
def controlnet_seg_inpaint(self, image_path: str):
|
210 |
image_processor = AutoImageProcessor.from_pretrained(
|
|
|
251 |
seed_generator: int,
|
252 |
):
|
253 |
|
254 |
+
normal_image = image_path["image"].convert("RGB").resize((512, 512))
|
255 |
+
mask_image = image_path["mask"].convert("RGB").resize((512, 512))
|
256 |
+
|
257 |
+
normal_image = self.load_image(image_path=normal_image)
|
258 |
+
mask_image = self.load_image(image_path=mask_image)
|
259 |
+
|
260 |
+
controlnet_image = self.controlnet_seg_inpaint(image_path=image_path)
|
261 |
|
262 |
pipe = self.load_model(
|
263 |
stable_model_path=stable_model_path,
|
|
|
273 |
|
274 |
output = pipe(
|
275 |
prompt=prompt,
|
276 |
+
image=normal_image,
|
277 |
+
mask_image=mask_image,
|
278 |
+
controlnet_image=controlnet_image,
|
279 |
negative_prompt=negative_prompt,
|
280 |
num_images_per_prompt=num_images_per_prompt,
|
281 |
num_inference_steps=num_inference_step,
|
|
|
311 |
with gr.Column():
|
312 |
controlnet_seg_inpaint_stable_model_id = (
|
313 |
gr.Dropdown(
|
314 |
+
choices=stable_inpiant_model_list,
|
315 |
+
value=stable_inpiant_model_list[0],
|
316 |
label="Stable Model Id",
|
317 |
)
|
318 |
)
|