kadirnar commited on
Commit
c1739c6
·
1 Parent(s): dec6071
diffusion_webui/__init__.py DELETED
@@ -1,17 +0,0 @@
1
- from diffusion_webui.diffusion_models.controlnet_inpaint_pipeline import (
2
- StableDiffusionControlNetInpaintGenerator,
3
- )
4
- from diffusion_webui.diffusion_models.controlnet_pipeline import (
5
- StableDiffusionControlNetGenerator,
6
- )
7
- from diffusion_webui.diffusion_models.img2img_app import (
8
- StableDiffusionImage2ImageGenerator,
9
- )
10
- from diffusion_webui.diffusion_models.inpaint_app import (
11
- StableDiffusionInpaintGenerator,
12
- )
13
- from diffusion_webui.diffusion_models.text2img_app import (
14
- StableDiffusionText2ImageGenerator,
15
- )
16
-
17
- __version__ = "2.5.0"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/diffusion_models/__init__.py DELETED
File without changes
diffusion_webui/diffusion_models/base_controlnet_pipeline.py DELETED
@@ -1,31 +0,0 @@
1
- class ControlnetPipeline:
2
- def __init__(self):
3
- self.pipe = None
4
-
5
- def load_model(self, stable_model_path: str, controlnet_model_path: str):
6
- raise NotImplementedError()
7
-
8
- def load_image(self, image_path: str):
9
- raise NotImplementedError()
10
-
11
- def controlnet_preprocces(self, read_image: str):
12
- raise NotImplementedError()
13
-
14
- def generate_image(
15
- self,
16
- image_path: str,
17
- stable_model_path: str,
18
- controlnet_model_path: str,
19
- prompt: str,
20
- negative_prompt: str,
21
- num_images_per_prompt: int,
22
- guidance_scale: int,
23
- num_inference_step: int,
24
- controlnet_conditioning_scale: int,
25
- scheduler: str,
26
- seed_generator: int,
27
- ):
28
- raise NotImplementedError()
29
-
30
- def web_interface():
31
- raise NotImplementedError()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/diffusion_models/controlnet_inpaint_pipeline.py DELETED
@@ -1,258 +0,0 @@
1
- import gradio as gr
2
- import numpy as np
3
- import torch
4
- from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline
5
- from PIL import Image
6
-
7
- from diffusion_webui.diffusion_models.base_controlnet_pipeline import (
8
- ControlnetPipeline,
9
- )
10
- from diffusion_webui.utils.model_list import (
11
- controlnet_model_list,
12
- stable_model_list,
13
- )
14
- from diffusion_webui.utils.preprocces_utils import PREPROCCES_DICT
15
- from diffusion_webui.utils.scheduler_list import (
16
- SCHEDULER_MAPPING,
17
- get_scheduler,
18
- )
19
-
20
-
21
- class StableDiffusionControlNetInpaintGenerator(ControlnetPipeline):
22
- def __init__(self):
23
- super().__init__()
24
-
25
- def load_model(self, stable_model_path, controlnet_model_path, scheduler):
26
- if self.pipe is None or self.pipe.model_name != stable_model_path or self.pipe.scheduler_name != scheduler:
27
- controlnet = ControlNetModel.from_pretrained(
28
- controlnet_model_path, torch_dtype=torch.float16
29
- )
30
- self.pipe = (
31
- StableDiffusionControlNetInpaintPipeline.from_pretrained(
32
- pretrained_model_name_or_path=stable_model_path,
33
- controlnet=controlnet,
34
- safety_checker=None,
35
- torch_dtype=torch.float16,
36
- )
37
- )
38
-
39
- self.pipe.model_name = stable_model_path
40
- self.pipe.scheduler_name = scheduler
41
- self.pipe = get_scheduler(pipe=self.pipe, scheduler=scheduler)
42
- self.pipe.to("cuda")
43
- self.pipe.enable_xformers_memory_efficient_attention()
44
-
45
- return self.pipe
46
-
47
- def load_image(self, image):
48
- image = np.array(image)
49
- image = Image.fromarray(image)
50
- return image
51
-
52
- def controlnet_preprocces(
53
- self,
54
- read_image: str,
55
- preprocces_type: str,
56
- ):
57
- processed_image = PREPROCCES_DICT[preprocces_type](read_image)
58
- return processed_image
59
-
60
- def generate_image(
61
- self,
62
- image_path: str,
63
- stable_model_path: str,
64
- controlnet_model_path: str,
65
- prompt: str,
66
- negative_prompt: str,
67
- num_images_per_prompt: int,
68
- height: int,
69
- width: int,
70
- strength: int,
71
- guess_mode: bool,
72
- guidance_scale: int,
73
- num_inference_step: int,
74
- controlnet_conditioning_scale: int,
75
- scheduler: str,
76
- seed_generator: int,
77
- preprocces_type: str,
78
- ):
79
- normal_image = image_path["image"].convert("RGB").resize((512, 512))
80
- mask_image = image_path["mask"].convert("RGB").resize((512, 512))
81
-
82
- normal_image = self.load_image(image=normal_image)
83
- mask_image = self.load_image(image=mask_image)
84
-
85
- control_image = self.controlnet_preprocces(
86
- read_image=normal_image, preprocces_type=preprocces_type
87
- )
88
- pipe = self.load_model(
89
- stable_model_path=stable_model_path,
90
- controlnet_model_path=controlnet_model_path,
91
- scheduler=scheduler,
92
- )
93
-
94
- if seed_generator == 0:
95
- random_seed = torch.randint(0, 1000000, (1,))
96
- generator = torch.manual_seed(random_seed)
97
- else:
98
- generator = torch.manual_seed(seed_generator)
99
-
100
- output = pipe(
101
- prompt=prompt,
102
- image=normal_image,
103
- height=height,
104
- width=width,
105
- mask_image=mask_image,
106
- strength=strength,
107
- guess_mode=guess_mode,
108
- control_image=control_image,
109
- negative_prompt=negative_prompt,
110
- num_images_per_prompt=num_images_per_prompt,
111
- num_inference_steps=num_inference_step,
112
- guidance_scale=guidance_scale,
113
- controlnet_conditioning_scale=float(controlnet_conditioning_scale),
114
- generator=generator,
115
- ).images
116
-
117
- return output
118
-
119
- def app():
120
- with gr.Blocks():
121
- with gr.Row():
122
- with gr.Column():
123
- controlnet_inpaint_image_path = gr.Image(
124
- source="upload",
125
- tool="sketch",
126
- elem_id="image_upload",
127
- type="pil",
128
- label="Upload",
129
- ).style(height=260)
130
-
131
- controlnet_inpaint_prompt = gr.Textbox(
132
- lines=1, placeholder="Prompt", show_label=False
133
- )
134
- controlnet_inpaint_negative_prompt = gr.Textbox(
135
- lines=1, placeholder="Negative Prompt", show_label=False
136
- )
137
-
138
- with gr.Row():
139
- with gr.Column():
140
- controlnet_inpaint_stable_model_path = gr.Dropdown(
141
- choices=stable_model_list,
142
- value=stable_model_list[0],
143
- label="Stable Model Path",
144
- )
145
- controlnet_inpaint_preprocces_type = gr.Dropdown(
146
- choices=list(PREPROCCES_DICT.keys()),
147
- value=list(PREPROCCES_DICT.keys())[0],
148
- label="Preprocess Type",
149
- )
150
- controlnet_inpaint_conditioning_scale = gr.Slider(
151
- minimum=0.0,
152
- maximum=1.0,
153
- step=0.1,
154
- value=1.0,
155
- label="ControlNet Conditioning Scale",
156
- )
157
- controlnet_inpaint_guidance_scale = gr.Slider(
158
- minimum=0.1,
159
- maximum=15,
160
- step=0.1,
161
- value=7.5,
162
- label="Guidance Scale",
163
- )
164
- controlnet_inpaint_height = gr.Slider(
165
- minimum=128,
166
- maximum=1280,
167
- step=32,
168
- value=512,
169
- label="Height",
170
- )
171
- controlnet_inpaint_width = gr.Slider(
172
- minimum=128,
173
- maximum=1280,
174
- step=32,
175
- value=512,
176
- label="Width",
177
- )
178
- controlnet_inpaint_guess_mode = gr.Checkbox(
179
- label="Guess Mode"
180
- )
181
-
182
- with gr.Column():
183
- controlnet_inpaint_model_path = gr.Dropdown(
184
- choices=controlnet_model_list,
185
- value=controlnet_model_list[0],
186
- label="ControlNet Model Path",
187
- )
188
- controlnet_inpaint_scheduler = gr.Dropdown(
189
- choices=list(SCHEDULER_MAPPING.keys()),
190
- value=list(SCHEDULER_MAPPING.keys())[0],
191
- label="Scheduler",
192
- )
193
- controlnet_inpaint_strength = gr.Slider(
194
- minimum=0.1,
195
- maximum=15,
196
- step=0.1,
197
- value=7.5,
198
- label="Strength",
199
- )
200
- controlnet_inpaint_num_inference_step = gr.Slider(
201
- minimum=1,
202
- maximum=150,
203
- step=1,
204
- value=30,
205
- label="Num Inference Step",
206
- )
207
- controlnet_inpaint_num_images_per_prompt = (
208
- gr.Slider(
209
- minimum=1,
210
- maximum=4,
211
- step=1,
212
- value=1,
213
- label="Number Of Images",
214
- )
215
- )
216
- controlnet_inpaint_seed_generator = gr.Slider(
217
- minimum=0,
218
- maximum=1000000,
219
- step=1,
220
- value=0,
221
- label="Seed(0 for random)",
222
- )
223
-
224
- # Button to generate the image
225
- controlnet_inpaint_predict_button = gr.Button(
226
- value="Generate Image"
227
- )
228
-
229
- with gr.Column():
230
- # Gallery to display the generated images
231
- controlnet_inpaint_output_image = gr.Gallery(
232
- label="Generated images",
233
- show_label=False,
234
- elem_id="gallery",
235
- ).style(grid=(1, 2))
236
-
237
- controlnet_inpaint_predict_button.click(
238
- fn=StableDiffusionControlNetInpaintGenerator().generate_image,
239
- inputs=[
240
- controlnet_inpaint_image_path,
241
- controlnet_inpaint_stable_model_path,
242
- controlnet_inpaint_model_path,
243
- controlnet_inpaint_prompt,
244
- controlnet_inpaint_negative_prompt,
245
- controlnet_inpaint_num_images_per_prompt,
246
- controlnet_inpaint_height,
247
- controlnet_inpaint_width,
248
- controlnet_inpaint_strength,
249
- controlnet_inpaint_guess_mode,
250
- controlnet_inpaint_guidance_scale,
251
- controlnet_inpaint_num_inference_step,
252
- controlnet_inpaint_conditioning_scale,
253
- controlnet_inpaint_scheduler,
254
- controlnet_inpaint_seed_generator,
255
- controlnet_inpaint_preprocces_type,
256
- ],
257
- outputs=[controlnet_inpaint_output_image],
258
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/diffusion_models/controlnet_pipeline.py DELETED
@@ -1,262 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- import cv2
4
- from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
- from PIL import Image
6
-
7
- from diffusion_webui.diffusion_models.base_controlnet_pipeline import (
8
- ControlnetPipeline,
9
- )
10
- from diffusion_webui.utils.model_list import (
11
- controlnet_model_list,
12
- stable_model_list,
13
- )
14
- from diffusion_webui.utils.preprocces_utils import PREPROCCES_DICT
15
- from diffusion_webui.utils.scheduler_list import (
16
- SCHEDULER_MAPPING,
17
- get_scheduler,
18
- )
19
-
20
-
21
- stable_model_list = [
22
- "runwayml/stable-diffusion-v1-5",
23
- "dreamlike-art/dreamlike-diffusion-1.0",
24
- "kadirnar/maturemalemix_v0",
25
- "kadirnar/DreamShaper_v6"
26
- ]
27
-
28
- stable_inpiant_model_list = [
29
- "stabilityai/stable-diffusion-2-inpainting",
30
- "runwayml/stable-diffusion-inpainting",
31
- "saik0s/realistic_vision_inpainting",
32
- ]
33
-
34
- controlnet_model_list = [
35
- "lllyasviel/control_v11p_sd15_canny",
36
- "lllyasviel/control_v11f1p_sd15_depth",
37
- "lllyasviel/control_v11p_sd15_openpose",
38
- "lllyasviel/control_v11p_sd15_scribble",
39
- "lllyasviel/control_v11p_sd15_mlsd",
40
- "lllyasviel/control_v11e_sd15_shuffle",
41
- "lllyasviel/control_v11e_sd15_ip2p",
42
- "lllyasviel/control_v11p_sd15_lineart",
43
- "lllyasviel/control_v11p_sd15s2_lineart_anime",
44
- "lllyasviel/control_v11p_sd15_softedge",
45
- ]
46
-
47
- class StableDiffusionControlNetGenerator(ControlnetPipeline):
48
- def __init__(self):
49
- self.pipe = None
50
-
51
- def load_model(self, stable_model_path, controlnet_model_path, scheduler):
52
- if self.pipe is None or self.pipe.model_name != stable_model_path or self.pipe.scheduler_name != scheduler:
53
- controlnet = ControlNetModel.from_pretrained(
54
- controlnet_model_path, torch_dtype=torch.float16
55
- )
56
- self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
57
- pretrained_model_name_or_path=stable_model_path,
58
- controlnet=controlnet,
59
- safety_checker=None,
60
- torch_dtype=torch.float16,
61
- )
62
- self.pipe.model_name = stable_model_path
63
- self.pipe.scheduler_name = scheduler
64
-
65
- self.pipe = get_scheduler(pipe=self.pipe, scheduler=scheduler)
66
- self.pipe.scheduler_name = scheduler
67
- self.pipe.to("cuda")
68
- self.pipe.enable_xformers_memory_efficient_attention()
69
-
70
- return self.pipe
71
-
72
-
73
- def controlnet_preprocces(
74
- self,
75
- read_image: str,
76
- preprocces_type: str,
77
- ):
78
- processed_image = PREPROCCES_DICT[preprocces_type](read_image)
79
- return processed_image
80
-
81
- def generate_image(
82
- self,
83
- image_path: str,
84
- stable_model_path: str,
85
- controlnet_model_path: str,
86
- height: int,
87
- width: int,
88
- guess_mode: bool,
89
- controlnet_conditioning_scale: int,
90
- prompt: str,
91
- negative_prompt: str,
92
- num_images_per_prompt: int,
93
- guidance_scale: int,
94
- num_inference_step: int,
95
- scheduler: str,
96
- seed_generator: int,
97
- preprocces_type: str,
98
- ):
99
- pipe = self.load_model(
100
- stable_model_path=stable_model_path,
101
- controlnet_model_path=controlnet_model_path,
102
- scheduler=scheduler,
103
- )
104
- if preprocces_type== "ScribbleXDOG":
105
- read_image = cv2.imread(image_path)
106
- controlnet_image = self.controlnet_preprocces(read_image=read_image, preprocces_type=preprocces_type)[0]
107
- controlnet_image = Image.fromarray(controlnet_image)
108
-
109
- elif preprocces_type== "None":
110
- controlnet_image = self.controlnet_preprocces(read_image=image_path, preprocces_type=preprocces_type)
111
- else:
112
- read_image = Image.open(image_path)
113
- controlnet_image = self.controlnet_preprocces(read_image=read_image, preprocces_type=preprocces_type)
114
-
115
- if seed_generator == 0:
116
- random_seed = torch.randint(0, 1000000, (1,))
117
- generator = torch.manual_seed(random_seed)
118
- else:
119
- generator = torch.manual_seed(seed_generator)
120
-
121
-
122
- output = pipe(
123
- prompt=prompt,
124
- height=height,
125
- width=width,
126
- controlnet_conditioning_scale=float(controlnet_conditioning_scale),
127
- guess_mode=guess_mode,
128
- image=controlnet_image,
129
- negative_prompt=negative_prompt,
130
- num_images_per_prompt=num_images_per_prompt,
131
- num_inference_steps=num_inference_step,
132
- guidance_scale=guidance_scale,
133
- generator=generator,
134
- ).images
135
-
136
- return output
137
-
138
- def app():
139
- with gr.Blocks():
140
- with gr.Row():
141
- with gr.Column():
142
- controlnet_image_path = gr.Image(
143
- type="filepath", label="Image"
144
- ).style(height=260)
145
- controlnet_prompt = gr.Textbox(
146
- lines=1, placeholder="Prompt", show_label=False
147
- )
148
- controlnet_negative_prompt = gr.Textbox(
149
- lines=1, placeholder="Negative Prompt", show_label=False
150
- )
151
-
152
- with gr.Row():
153
- with gr.Column():
154
- controlnet_stable_model_path = gr.Dropdown(
155
- choices=stable_model_list,
156
- value=stable_model_list[0],
157
- label="Stable Model Path",
158
- )
159
- controlnet_preprocces_type = gr.Dropdown(
160
- choices=list(PREPROCCES_DICT.keys()),
161
- value=list(PREPROCCES_DICT.keys())[0],
162
- label="Preprocess Type",
163
- )
164
- controlnet_conditioning_scale = gr.Slider(
165
- minimum=0.0,
166
- maximum=1.0,
167
- step=0.1,
168
- value=1.0,
169
- label="ControlNet Conditioning Scale",
170
- )
171
- controlnet_guidance_scale = gr.Slider(
172
- minimum=0.1,
173
- maximum=15,
174
- step=0.1,
175
- value=7.5,
176
- label="Guidance Scale",
177
- )
178
- controlnet_height = gr.Slider(
179
- minimum=128,
180
- maximum=1280,
181
- step=32,
182
- value=512,
183
- label="Height",
184
- )
185
- controlnet_width = gr.Slider(
186
- minimum=128,
187
- maximum=1280,
188
- step=32,
189
- value=512,
190
- label="Width",
191
- )
192
-
193
- with gr.Row():
194
- with gr.Column():
195
- controlnet_model_path = gr.Dropdown(
196
- choices=controlnet_model_list,
197
- value=controlnet_model_list[0],
198
- label="ControlNet Model Path",
199
- )
200
- controlnet_scheduler = gr.Dropdown(
201
- choices=list(SCHEDULER_MAPPING.keys()),
202
- value=list(SCHEDULER_MAPPING.keys())[0],
203
- label="Scheduler",
204
- )
205
- controlnet_num_inference_step = gr.Slider(
206
- minimum=1,
207
- maximum=150,
208
- step=1,
209
- value=30,
210
- label="Num Inference Step",
211
- )
212
-
213
- controlnet_num_images_per_prompt = gr.Slider(
214
- minimum=1,
215
- maximum=4,
216
- step=1,
217
- value=1,
218
- label="Number Of Images",
219
- )
220
- controlnet_seed_generator = gr.Slider(
221
- minimum=0,
222
- maximum=1000000,
223
- step=1,
224
- value=0,
225
- label="Seed(0 for random)",
226
- )
227
- controlnet_guess_mode = gr.Checkbox(
228
- label="Guess Mode"
229
- )
230
-
231
- # Button to generate the image
232
- predict_button = gr.Button(value="Generate Image")
233
-
234
- with gr.Column():
235
- # Gallery to display the generated images
236
- output_image = gr.Gallery(
237
- label="Generated images",
238
- show_label=False,
239
- elem_id="gallery",
240
- ).style(grid=(1, 2))
241
-
242
- predict_button.click(
243
- fn=StableDiffusionControlNetGenerator().generate_image,
244
- inputs=[
245
- controlnet_image_path,
246
- controlnet_stable_model_path,
247
- controlnet_model_path,
248
- controlnet_height,
249
- controlnet_width,
250
- controlnet_guess_mode,
251
- controlnet_conditioning_scale,
252
- controlnet_prompt,
253
- controlnet_negative_prompt,
254
- controlnet_num_images_per_prompt,
255
- controlnet_guidance_scale,
256
- controlnet_num_inference_step,
257
- controlnet_scheduler,
258
- controlnet_seed_generator,
259
- controlnet_preprocces_type,
260
- ],
261
- outputs=[output_image],
262
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/diffusion_models/img2img_app.py DELETED
@@ -1,155 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from diffusers import StableDiffusionImg2ImgPipeline
4
- from PIL import Image
5
-
6
- from diffusion_webui.utils.model_list import stable_model_list
7
- from diffusion_webui.utils.scheduler_list import (
8
- SCHEDULER_MAPPING,
9
- get_scheduler,
10
- )
11
-
12
-
13
- class StableDiffusionImage2ImageGenerator:
14
- def __init__(self):
15
- self.pipe = None
16
-
17
- def load_model(self, stable_model_path, scheduler):
18
- if self.pipe is None or self.pipe.model_name != stable_model_path or self.pipe.scheduler_name != scheduler:
19
- self.pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
20
- stable_model_path, safety_checker=None, torch_dtype=torch.float16
21
- )
22
-
23
- self.pipe.model_name = stable_model_path
24
- self.pipe.scheduler_name = scheduler
25
- self.pipe = get_scheduler(pipe=self.pipe, scheduler=scheduler)
26
- self.pipe.to("cuda")
27
- self.pipe.enable_xformers_memory_efficient_attention()
28
-
29
- return self.pipe
30
-
31
- def generate_image(
32
- self,
33
- image_path: str,
34
- stable_model_path: str,
35
- prompt: str,
36
- negative_prompt: str,
37
- num_images_per_prompt: int,
38
- scheduler: str,
39
- guidance_scale: int,
40
- num_inference_step: int,
41
- seed_generator=0,
42
- ):
43
- pipe = self.load_model(
44
- stable_model_path=stable_model_path,
45
- scheduler=scheduler,
46
- )
47
-
48
- if seed_generator == 0:
49
- random_seed = torch.randint(0, 1000000, (1,))
50
- generator = torch.manual_seed(random_seed)
51
- else:
52
- generator = torch.manual_seed(seed_generator)
53
-
54
- image = Image.open(image_path)
55
- images = pipe(
56
- prompt,
57
- image=image,
58
- negative_prompt=negative_prompt,
59
- num_images_per_prompt=num_images_per_prompt,
60
- num_inference_steps=num_inference_step,
61
- guidance_scale=guidance_scale,
62
- generator=generator,
63
- ).images
64
-
65
- return images
66
-
67
- def app():
68
- with gr.Blocks():
69
- with gr.Row():
70
- with gr.Column():
71
- image2image_image_file = gr.Image(
72
- type="filepath", label="Image"
73
- ).style(height=260)
74
-
75
- image2image_prompt = gr.Textbox(
76
- lines=1,
77
- placeholder="Prompt",
78
- show_label=False,
79
- )
80
-
81
- image2image_negative_prompt = gr.Textbox(
82
- lines=1,
83
- placeholder="Negative Prompt",
84
- show_label=False,
85
- )
86
-
87
- with gr.Row():
88
- with gr.Column():
89
- image2image_model_path = gr.Dropdown(
90
- choices=stable_model_list,
91
- value=stable_model_list[0],
92
- label="Stable Model Id",
93
- )
94
-
95
- image2image_guidance_scale = gr.Slider(
96
- minimum=0.1,
97
- maximum=15,
98
- step=0.1,
99
- value=7.5,
100
- label="Guidance Scale",
101
- )
102
- image2image_num_inference_step = gr.Slider(
103
- minimum=1,
104
- maximum=100,
105
- step=1,
106
- value=50,
107
- label="Num Inference Step",
108
- )
109
- with gr.Row():
110
- with gr.Column():
111
- image2image_scheduler = gr.Dropdown(
112
- choices=list(SCHEDULER_MAPPING.keys()),
113
- value=list(SCHEDULER_MAPPING.keys())[0],
114
- label="Scheduler",
115
- )
116
- image2image_num_images_per_prompt = gr.Slider(
117
- minimum=1,
118
- maximum=4,
119
- step=1,
120
- value=1,
121
- label="Number Of Images",
122
- )
123
-
124
- image2image_seed_generator = gr.Slider(
125
- minimum=0,
126
- maximum=1000000,
127
- step=1,
128
- value=0,
129
- label="Seed(0 for random)",
130
- )
131
-
132
- image2image_predict_button = gr.Button(value="Generator")
133
-
134
- with gr.Column():
135
- output_image = gr.Gallery(
136
- label="Generated images",
137
- show_label=False,
138
- elem_id="gallery",
139
- ).style(grid=(1, 2))
140
-
141
- image2image_predict_button.click(
142
- fn=StableDiffusionImage2ImageGenerator().generate_image,
143
- inputs=[
144
- image2image_image_file,
145
- image2image_model_path,
146
- image2image_prompt,
147
- image2image_negative_prompt,
148
- image2image_num_images_per_prompt,
149
- image2image_scheduler,
150
- image2image_guidance_scale,
151
- image2image_num_inference_step,
152
- image2image_seed_generator,
153
- ],
154
- outputs=[output_image],
155
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/diffusion_models/inpaint_app.py DELETED
@@ -1,149 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from diffusers import DiffusionPipeline
4
-
5
- from diffusion_webui.utils.model_list import stable_inpiant_model_list
6
-
7
-
8
- class StableDiffusionInpaintGenerator:
9
- def __init__(self):
10
- self.pipe = None
11
-
12
- def load_model(self, stable_model_path):
13
- if self.pipe is None or self.pipe.model_name != stable_model_path:
14
- self.pipe = DiffusionPipeline.from_pretrained(
15
- stable_model_path, revision="fp16", torch_dtype=torch.float16
16
- )
17
- self.pipe.to("cuda")
18
- self.pipe.enable_xformers_memory_efficient_attention()
19
- self.pipe.model_name = stable_model_path
20
-
21
-
22
- return self.pipe
23
-
24
- def generate_image(
25
- self,
26
- pil_image: str,
27
- stable_model_path: str,
28
- prompt: str,
29
- negative_prompt: str,
30
- num_images_per_prompt: int,
31
- guidance_scale: int,
32
- num_inference_step: int,
33
- seed_generator=0,
34
- ):
35
- image = pil_image["image"].convert("RGB").resize((512, 512))
36
- mask_image = pil_image["mask"].convert("RGB").resize((512, 512))
37
- pipe = self.load_model(stable_model_path)
38
-
39
- if seed_generator == 0:
40
- random_seed = torch.randint(0, 1000000, (1,))
41
- generator = torch.manual_seed(random_seed)
42
- else:
43
- generator = torch.manual_seed(seed_generator)
44
-
45
- output = pipe(
46
- prompt=prompt,
47
- image=image,
48
- mask_image=mask_image,
49
- negative_prompt=negative_prompt,
50
- num_images_per_prompt=num_images_per_prompt,
51
- num_inference_steps=num_inference_step,
52
- guidance_scale=guidance_scale,
53
- generator=generator,
54
- ).images
55
-
56
- return output
57
-
58
- def app():
59
- with gr.Blocks():
60
- with gr.Row():
61
- with gr.Column():
62
- stable_diffusion_inpaint_image_file = gr.Image(
63
- source="upload",
64
- tool="sketch",
65
- elem_id="image_upload",
66
- type="pil",
67
- label="Upload",
68
- ).style(height=260)
69
-
70
- stable_diffusion_inpaint_prompt = gr.Textbox(
71
- lines=1,
72
- placeholder="Prompt",
73
- show_label=False,
74
- )
75
-
76
- stable_diffusion_inpaint_negative_prompt = gr.Textbox(
77
- lines=1,
78
- placeholder="Negative Prompt",
79
- show_label=False,
80
- )
81
- stable_diffusion_inpaint_model_id = gr.Dropdown(
82
- choices=stable_inpiant_model_list,
83
- value=stable_inpiant_model_list[0],
84
- label="Inpaint Model Id",
85
- )
86
- with gr.Row():
87
- with gr.Column():
88
- stable_diffusion_inpaint_guidance_scale = gr.Slider(
89
- minimum=0.1,
90
- maximum=15,
91
- step=0.1,
92
- value=7.5,
93
- label="Guidance Scale",
94
- )
95
-
96
- stable_diffusion_inpaint_num_inference_step = (
97
- gr.Slider(
98
- minimum=1,
99
- maximum=100,
100
- step=1,
101
- value=50,
102
- label="Num Inference Step",
103
- )
104
- )
105
-
106
- with gr.Row():
107
- with gr.Column():
108
- stable_diffusion_inpiant_num_images_per_prompt = gr.Slider(
109
- minimum=1,
110
- maximum=4,
111
- step=1,
112
- value=1,
113
- label="Number Of Images",
114
- )
115
- stable_diffusion_inpaint_seed_generator = (
116
- gr.Slider(
117
- minimum=0,
118
- maximum=1000000,
119
- step=1,
120
- value=0,
121
- label="Seed(0 for random)",
122
- )
123
- )
124
-
125
- stable_diffusion_inpaint_predict = gr.Button(
126
- value="Generator"
127
- )
128
-
129
- with gr.Column():
130
- output_image = gr.Gallery(
131
- label="Generated images",
132
- show_label=False,
133
- elem_id="gallery",
134
- ).style(grid=(1, 2))
135
-
136
- stable_diffusion_inpaint_predict.click(
137
- fn=StableDiffusionInpaintGenerator().generate_image,
138
- inputs=[
139
- stable_diffusion_inpaint_image_file,
140
- stable_diffusion_inpaint_model_id,
141
- stable_diffusion_inpaint_prompt,
142
- stable_diffusion_inpaint_negative_prompt,
143
- stable_diffusion_inpiant_num_images_per_prompt,
144
- stable_diffusion_inpaint_guidance_scale,
145
- stable_diffusion_inpaint_num_inference_step,
146
- stable_diffusion_inpaint_seed_generator,
147
- ],
148
- outputs=[output_image],
149
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/diffusion_models/text2img_app.py DELETED
@@ -1,173 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from diffusers import StableDiffusionPipeline,DiffusionPipeline
4
-
5
- from diffusion_webui.utils.model_list import stable_model_list
6
- from diffusion_webui.utils.scheduler_list import (
7
- SCHEDULER_MAPPING,
8
- get_scheduler,
9
- )
10
-
11
-
12
- class StableDiffusionText2ImageGenerator:
13
- def __init__(self):
14
- self.pipe = None
15
-
16
- def load_model(
17
- self,
18
- stable_model_path,
19
- scheduler,
20
- ):
21
- if self.pipe is None or self.pipe.model_name != stable_model_path or self.pipe.scheduler_name != scheduler:
22
- if stable_model_path == "stabilityai/stable-diffusion-xl-base-0.9":
23
- self.pipe = DiffusionPipeline.from_pretrained(
24
- stable_model_path, safety_checker=None, torch_dtype=torch.float16
25
- )
26
- else:
27
- self.pipe = StableDiffusionPipeline.from_pretrained(
28
- stable_model_path, safety_checker=None, torch_dtype=torch.float16
29
- )
30
-
31
- self.pipe = get_scheduler(pipe=self.pipe, scheduler=scheduler)
32
- self.pipe.to("cuda")
33
- self.pipe.enable_xformers_memory_efficient_attention()
34
- self.pipe.model_name = stable_model_path
35
- self.pipe.scheduler_name = scheduler
36
-
37
- return self.pipe
38
-
39
- def generate_image(
40
- self,
41
- stable_model_path: str,
42
- prompt: str,
43
- negative_prompt: str,
44
- num_images_per_prompt: int,
45
- scheduler: str,
46
- guidance_scale: int,
47
- num_inference_step: int,
48
- height: int,
49
- width: int,
50
- seed_generator=0,
51
- ):
52
- pipe = self.load_model(
53
- stable_model_path=stable_model_path,
54
- scheduler=scheduler,
55
- )
56
- if seed_generator == 0:
57
- random_seed = torch.randint(0, 1000000, (1,))
58
- generator = torch.manual_seed(random_seed)
59
- else:
60
- generator = torch.manual_seed(seed_generator)
61
-
62
- images = pipe(
63
- prompt=prompt,
64
- height=height,
65
- width=width,
66
- negative_prompt=negative_prompt,
67
- num_images_per_prompt=num_images_per_prompt,
68
- num_inference_steps=num_inference_step,
69
- guidance_scale=guidance_scale,
70
- generator=generator,
71
- ).images
72
-
73
- return images
74
-
75
- def app():
76
- with gr.Blocks():
77
- with gr.Row():
78
- with gr.Column():
79
- text2image_prompt = gr.Textbox(
80
- lines=1,
81
- placeholder="Prompt",
82
- show_label=False,
83
- )
84
-
85
- text2image_negative_prompt = gr.Textbox(
86
- lines=1,
87
- placeholder="Negative Prompt",
88
- show_label=False,
89
- )
90
- with gr.Row():
91
- with gr.Column():
92
- text2image_model_path = gr.Dropdown(
93
- choices=stable_model_list,
94
- value=stable_model_list[0],
95
- label="Text-Image Model Id",
96
- )
97
-
98
- text2image_guidance_scale = gr.Slider(
99
- minimum=0.1,
100
- maximum=15,
101
- step=0.1,
102
- value=7.5,
103
- label="Guidance Scale",
104
- )
105
-
106
- text2image_num_inference_step = gr.Slider(
107
- minimum=1,
108
- maximum=100,
109
- step=1,
110
- value=50,
111
- label="Num Inference Step",
112
- )
113
- text2image_num_images_per_prompt = gr.Slider(
114
- minimum=1,
115
- maximum=4,
116
- step=1,
117
- value=1,
118
- label="Number Of Images",
119
- )
120
- with gr.Row():
121
- with gr.Column():
122
- text2image_scheduler = gr.Dropdown(
123
- choices=list(SCHEDULER_MAPPING.keys()),
124
- value=list(SCHEDULER_MAPPING.keys())[0],
125
- label="Scheduler",
126
- )
127
-
128
- text2image_height = gr.Slider(
129
- minimum=128,
130
- maximum=1280,
131
- step=32,
132
- value=512,
133
- label="Image Height",
134
- )
135
-
136
- text2image_width = gr.Slider(
137
- minimum=128,
138
- maximum=1280,
139
- step=32,
140
- value=512,
141
- label="Image Width",
142
- )
143
- text2image_seed_generator = gr.Slider(
144
- label="Seed(0 for random)",
145
- minimum=0,
146
- maximum=1000000,
147
- value=0,
148
- )
149
- text2image_predict = gr.Button(value="Generator")
150
-
151
- with gr.Column():
152
- output_image = gr.Gallery(
153
- label="Generated images",
154
- show_label=False,
155
- elem_id="gallery",
156
- ).style(grid=(1, 2), height=200)
157
-
158
- text2image_predict.click(
159
- fn=StableDiffusionText2ImageGenerator().generate_image,
160
- inputs=[
161
- text2image_model_path,
162
- text2image_prompt,
163
- text2image_negative_prompt,
164
- text2image_num_images_per_prompt,
165
- text2image_scheduler,
166
- text2image_guidance_scale,
167
- text2image_num_inference_step,
168
- text2image_height,
169
- text2image_width,
170
- text2image_seed_generator,
171
- ],
172
- outputs=output_image,
173
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/utils/__init__.py DELETED
File without changes
diffusion_webui/utils/data_utils.py DELETED
@@ -1,12 +0,0 @@
1
- from PIL import Image
2
-
3
-
4
- def image_grid(imgs, rows, cols):
5
- assert len(imgs) == rows * cols
6
-
7
- w, h = imgs[0].size
8
- grid = Image.new("RGB", size=(cols * w, rows * h))
9
-
10
- for i, img in enumerate(imgs):
11
- grid.paste(img, box=(i % cols * w, i // cols * h))
12
- return grid
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/utils/model_list.py DELETED
@@ -1,25 +0,0 @@
1
- stable_model_list = [
2
- "runwayml/stable-diffusion-v1-5",
3
- "SG161222/Realistic_Vision_V2.0",
4
- "stablediffusionapi/cyberrealistic",
5
- "SG161222/Realistic_Vision_V5.1_noVAE",
6
- ]
7
-
8
- stable_inpiant_model_list = [
9
- "kadirnar/Realistic51-Inpaint",
10
- "stabilityai/stable-diffusion-2-inpainting",
11
- "runwayml/stable-diffusion-inpainting",
12
- ]
13
-
14
- controlnet_model_list = [
15
- "lllyasviel/control_v11p_sd15_canny",
16
- "lllyasviel/control_v11f1p_sd15_depth",
17
- "lllyasviel/control_v11p_sd15_openpose",
18
- "lllyasviel/control_v11p_sd15_scribble",
19
- "lllyasviel/control_v11p_sd15_mlsd",
20
- "lllyasviel/control_v11e_sd15_shuffle",
21
- "lllyasviel/control_v11e_sd15_ip2p",
22
- "lllyasviel/control_v11p_sd15_lineart",
23
- "lllyasviel/control_v11p_sd15s2_lineart_anime",
24
- "lllyasviel/control_v11p_sd15_softedge",
25
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/utils/preprocces_utils.py DELETED
@@ -1,94 +0,0 @@
1
- from controlnet_aux import (
2
- CannyDetector,
3
- ContentShuffleDetector,
4
- HEDdetector,
5
- LineartAnimeDetector,
6
- LineartDetector,
7
- MediapipeFaceDetector,
8
- MidasDetector,
9
- MLSDdetector,
10
- NormalBaeDetector,
11
- OpenposeDetector,
12
- PidiNetDetector,
13
- SamDetector,
14
- )
15
-
16
- import numpy as np
17
- import cv2
18
-
19
- def pad64(x):
20
- return int(np.ceil(float(x) / 64.0) * 64 - x)
21
-
22
- def HWC3(x):
23
- assert x.dtype == np.uint8
24
- if x.ndim == 2:
25
- x = x[:, :, None]
26
- assert x.ndim == 3
27
- H, W, C = x.shape
28
- assert C == 1 or C == 3 or C == 4
29
- if C == 3:
30
- return x
31
- if C == 1:
32
- return np.concatenate([x, x, x], axis=2)
33
- if C == 4:
34
- color = x[:, :, 0:3].astype(np.float32)
35
- alpha = x[:, :, 3:4].astype(np.float32) / 255.0
36
- y = color * alpha + 255.0 * (1.0 - alpha)
37
- y = y.clip(0, 255).astype(np.uint8)
38
- return y
39
-
40
- def safer_memory(x):
41
- return np.ascontiguousarray(x.copy()).copy()
42
-
43
-
44
- def resize_image_with_pad(input_image, resolution, skip_hwc3=False):
45
- if skip_hwc3:
46
- img = input_image
47
- else:
48
- img = HWC3(input_image)
49
-
50
- H_raw, W_raw, _ = img.shape
51
- k = float(resolution) / float(min(H_raw, W_raw))
52
- interpolation = cv2.INTER_CUBIC if k > 1 else cv2.INTER_AREA
53
- H_target = int(np.round(float(H_raw) * k))
54
- W_target = int(np.round(float(W_raw) * k))
55
- img = cv2.resize(img, (W_target, H_target), interpolation=interpolation)
56
- H_pad, W_pad = pad64(H_target), pad64(W_target)
57
- img_padded = np.pad(img, [[0, H_pad], [0, W_pad], [0, 0]], mode='edge')
58
-
59
- def remove_pad(x):
60
- return safer_memory(x[:H_target, :W_target])
61
-
62
- return safer_memory(img_padded), remove_pad
63
-
64
-
65
- def scribble_xdog(img, res=512, thr_a=32, **kwargs):
66
- img, remove_pad = resize_image_with_pad(img, res)
67
- g1 = cv2.GaussianBlur(img.astype(np.float32), (0, 0), 0.5)
68
- g2 = cv2.GaussianBlur(img.astype(np.float32), (0, 0), 5.0)
69
- dog = (255 - np.min(g2 - g1, axis=2)).clip(0, 255).astype(np.uint8)
70
- result = np.zeros_like(img, dtype=np.uint8)
71
- result[2 * (255 - dog) > thr_a] = 255
72
- return remove_pad(result), True
73
-
74
- def none_preprocces(image_path:str):
75
- return Image.open(image_path)
76
-
77
- PREPROCCES_DICT = {
78
- "Hed": HEDdetector.from_pretrained("lllyasviel/Annotators"),
79
- "Midas": MidasDetector.from_pretrained("lllyasviel/Annotators"),
80
- "MLSD": MLSDdetector.from_pretrained("lllyasviel/Annotators"),
81
- "Openpose": OpenposeDetector.from_pretrained("lllyasviel/Annotators"),
82
- "PidiNet": PidiNetDetector.from_pretrained("lllyasviel/Annotators"),
83
- "NormalBae": NormalBaeDetector.from_pretrained("lllyasviel/Annotators"),
84
- "Lineart": LineartDetector.from_pretrained("lllyasviel/Annotators"),
85
- "LineartAnime": LineartAnimeDetector.from_pretrained(
86
- "lllyasviel/Annotators"
87
- ),
88
- "Canny": CannyDetector(),
89
- "ContentShuffle": ContentShuffleDetector(),
90
- "MediapipeFace": MediapipeFaceDetector(),
91
- "ScribbleXDOG": scribble_xdog,
92
- "None": none_preprocces
93
- }
94
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diffusion_webui/utils/scheduler_list.py DELETED
@@ -1,39 +0,0 @@
1
- from diffusers import (
2
- DDIMScheduler,
3
- DDPMScheduler,
4
- DEISMultistepScheduler,
5
- DPMSolverMultistepScheduler,
6
- DPMSolverSinglestepScheduler,
7
- EulerAncestralDiscreteScheduler,
8
- EulerDiscreteScheduler,
9
- HeunDiscreteScheduler,
10
- KDPM2AncestralDiscreteScheduler,
11
- KDPM2DiscreteScheduler,
12
- PNDMScheduler,
13
- UniPCMultistepScheduler,
14
- )
15
-
16
- SCHEDULER_MAPPING = {
17
- "DDIM": DDIMScheduler,
18
- "DDPMScheduler": DDPMScheduler,
19
- "DEISMultistep": DEISMultistepScheduler,
20
- "DPMSolverMultistep": DPMSolverMultistepScheduler,
21
- "DPMSolverSinglestep": DPMSolverSinglestepScheduler,
22
- "EulerAncestralDiscrete": EulerAncestralDiscreteScheduler,
23
- "EulerDiscrete": EulerDiscreteScheduler,
24
- "HeunDiscrete": HeunDiscreteScheduler,
25
- "KDPM2AncestralDiscrete": KDPM2AncestralDiscreteScheduler,
26
- "KDPM2Discrete": KDPM2DiscreteScheduler,
27
- "PNDMScheduler": PNDMScheduler,
28
- "UniPCMultistep": UniPCMultistepScheduler,
29
- }
30
-
31
-
32
- def get_scheduler(pipe, scheduler):
33
- if scheduler in SCHEDULER_MAPPING:
34
- SchedulerClass = SCHEDULER_MAPPING[scheduler]
35
- pipe.scheduler = SchedulerClass.from_config(pipe.scheduler.config)
36
- else:
37
- raise ValueError(f"Invalid scheduler name {scheduler}")
38
-
39
- return pipe