Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
c4b1a2f
1
Parent(s):
2a0761a
Initial commit
Browse files- .gitignore +10 -0
- README.md +1 -1
- app.py +413 -0
- mixture_tiling_sdxl.py +1198 -0
- requirements.txt +10 -0
.gitignore
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__/
|
2 |
+
*.py[cod]
|
3 |
+
/.vs
|
4 |
+
.vscode/
|
5 |
+
.idea/
|
6 |
+
venv/
|
7 |
+
.venv/
|
8 |
+
*.log
|
9 |
+
.DS_Store
|
10 |
+
.gradio
|
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
title: Mixture Of Diffusers
|
3 |
emoji: 🐢
|
4 |
colorFrom: purple
|
5 |
colorTo: red
|
|
|
1 |
---
|
2 |
+
title: Mixture Of Diffusers SDXL Tiling
|
3 |
emoji: 🐢
|
4 |
colorFrom: purple
|
5 |
colorTo: red
|
app.py
ADDED
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
import spaces
|
5 |
+
import torch
|
6 |
+
from diffusers import AutoencoderKL
|
7 |
+
from mixture_tiling_sdxl import StableDiffusionXLTilingPipeline
|
8 |
+
|
9 |
+
MAX_SEED = np.iinfo(np.int32).max
|
10 |
+
SCHEDULERS = [
|
11 |
+
"LMSDiscreteScheduler",
|
12 |
+
"DEISMultistepScheduler",
|
13 |
+
"HeunDiscreteScheduler",
|
14 |
+
"EulerAncestralDiscreteScheduler",
|
15 |
+
"EulerDiscreteScheduler",
|
16 |
+
"DPMSolverMultistepScheduler",
|
17 |
+
"DPMSolverMultistepScheduler-Karras",
|
18 |
+
"DPMSolverMultistepScheduler-Karras-SDE",
|
19 |
+
"UniPCMultistepScheduler"
|
20 |
+
]
|
21 |
+
|
22 |
+
vae = AutoencoderKL.from_pretrained(
|
23 |
+
"madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16
|
24 |
+
).to("cuda")
|
25 |
+
|
26 |
+
model_id="stablediffusionapi/yamermix-v8-vae"
|
27 |
+
pipe = StableDiffusionXLTilingPipeline.from_pretrained(
|
28 |
+
model_id,
|
29 |
+
torch_dtype=torch.float16,
|
30 |
+
vae=vae,
|
31 |
+
use_safetensors=False, #for yammermix
|
32 |
+
#variant="fp16",
|
33 |
+
).to("cuda")
|
34 |
+
|
35 |
+
#pipe.enable_model_cpu_offload() #<< Enable this if you have limited VRAM
|
36 |
+
pipe.enable_vae_tiling()
|
37 |
+
pipe.enable_vae_slicing()
|
38 |
+
|
39 |
+
#region functions
|
40 |
+
def select_scheduler(scheduler_name):
|
41 |
+
scheduler = scheduler_name.split("-")
|
42 |
+
scheduler_class_name = scheduler[0]
|
43 |
+
add_kwargs = {"beta_start": 0.00085, "beta_end": 0.012, "beta_schedule": "scaled_linear", "num_train_timesteps": 1000}
|
44 |
+
if len(scheduler) > 1:
|
45 |
+
add_kwargs["use_karras_sigmas"] = True
|
46 |
+
if len(scheduler) > 2:
|
47 |
+
add_kwargs["algorithm_type"] = "sde-dpmsolver++"
|
48 |
+
import diffusers
|
49 |
+
scheduler = getattr(diffusers, scheduler_class_name)
|
50 |
+
scheduler = scheduler.from_config(pipe.scheduler.config, **add_kwargs)
|
51 |
+
return scheduler
|
52 |
+
|
53 |
+
@spaces.GPU(duration=16)
|
54 |
+
def predict(left_prompt, center_prompt, right_prompt, negative_prompt, left_gs, center_gs, right_gs, overlap_pixels, steps, generation_seed, scheduler, tile_height, tile_width, target_height, target_width):
|
55 |
+
global pipe
|
56 |
+
|
57 |
+
# Set selected scheduler
|
58 |
+
print(f"Using scheduler: {scheduler}...")
|
59 |
+
pipe.scheduler = select_scheduler(scheduler)
|
60 |
+
|
61 |
+
# Set seed
|
62 |
+
generator = torch.Generator("cuda").manual_seed(generation_seed)
|
63 |
+
|
64 |
+
target_height = int(target_height)
|
65 |
+
target_width = int(target_width)
|
66 |
+
tile_height = int(tile_height)
|
67 |
+
tile_width = int(tile_width)
|
68 |
+
|
69 |
+
# Mixture of Diffusers generation
|
70 |
+
image = pipe(
|
71 |
+
prompt=[
|
72 |
+
[
|
73 |
+
left_prompt,
|
74 |
+
center_prompt,
|
75 |
+
right_prompt,
|
76 |
+
]
|
77 |
+
],
|
78 |
+
negative_prompt=negative_prompt,
|
79 |
+
tile_height=tile_height,
|
80 |
+
tile_width=tile_width,
|
81 |
+
tile_row_overlap=0,
|
82 |
+
tile_col_overlap=overlap_pixels,
|
83 |
+
guidance_scale_tiles=[[left_gs, center_gs, right_gs]],
|
84 |
+
height=target_height,
|
85 |
+
width=target_width,
|
86 |
+
target_size=(target_height, target_width),
|
87 |
+
generator=generator,
|
88 |
+
num_inference_steps=steps,
|
89 |
+
)["images"][0]
|
90 |
+
|
91 |
+
return image
|
92 |
+
|
93 |
+
def calc_tile_size(target_height, target_width, overlap_pixels, max_tile_width_size=1280):
|
94 |
+
num_cols=3
|
95 |
+
num_rows=1
|
96 |
+
min_tile_dimension=8
|
97 |
+
reduction_step=8
|
98 |
+
max_tile_height_size=1024
|
99 |
+
best_tile_width = 0
|
100 |
+
best_tile_height = 0
|
101 |
+
best_adjusted_target_width = 0
|
102 |
+
best_adjusted_target_height = 0
|
103 |
+
found_valid_solution = False
|
104 |
+
|
105 |
+
# Adjust Tile Width
|
106 |
+
tile_width = max_tile_width_size
|
107 |
+
tile_height = max_tile_height_size
|
108 |
+
|
109 |
+
while tile_width >= min_tile_dimension:
|
110 |
+
horizontal_borders = num_cols - 1
|
111 |
+
total_horizontal_overlap_pixels = (overlap_pixels * horizontal_borders)
|
112 |
+
adjusted_target_width = tile_width * num_cols - total_horizontal_overlap_pixels
|
113 |
+
|
114 |
+
vertical_borders = num_rows - 1
|
115 |
+
total_vertical_overlap_pixels = (overlap_pixels * vertical_borders)
|
116 |
+
adjusted_target_height = tile_height * num_rows - total_vertical_overlap_pixels
|
117 |
+
|
118 |
+
if tile_width <= max_tile_width_size and adjusted_target_width <= target_width:
|
119 |
+
if adjusted_target_width > best_adjusted_target_width:
|
120 |
+
best_tile_width = tile_width
|
121 |
+
best_adjusted_target_width = adjusted_target_width
|
122 |
+
found_valid_solution = True
|
123 |
+
|
124 |
+
tile_width -= reduction_step
|
125 |
+
|
126 |
+
# Adjust Tile Height
|
127 |
+
if found_valid_solution:
|
128 |
+
tile_width = best_tile_width
|
129 |
+
tile_height = max_tile_height_size
|
130 |
+
|
131 |
+
while tile_height >= min_tile_dimension:
|
132 |
+
horizontal_borders = num_cols - 1
|
133 |
+
total_horizontal_overlap_pixels = (overlap_pixels * horizontal_borders)
|
134 |
+
adjusted_target_width = tile_width * num_cols - total_horizontal_overlap_pixels
|
135 |
+
|
136 |
+
vertical_borders = num_rows - 1
|
137 |
+
total_vertical_overlap_pixels = (overlap_pixels * vertical_borders)
|
138 |
+
adjusted_target_height = tile_height * num_rows - total_vertical_overlap_pixels
|
139 |
+
|
140 |
+
if tile_height <= max_tile_height_size and adjusted_target_height <= target_height:
|
141 |
+
if adjusted_target_height > best_adjusted_target_height:
|
142 |
+
best_tile_height = tile_height
|
143 |
+
best_adjusted_target_height = adjusted_target_height
|
144 |
+
|
145 |
+
tile_height -= reduction_step
|
146 |
+
|
147 |
+
new_target_height = best_adjusted_target_height
|
148 |
+
new_target_width = best_adjusted_target_width
|
149 |
+
tile_width = best_tile_width
|
150 |
+
tile_height = best_tile_height
|
151 |
+
|
152 |
+
print("--- TILE SIZE CALCULATED VALUES ---")
|
153 |
+
print(f"Overlap pixels (requested): {overlap_pixels}")
|
154 |
+
print(f"Tile Height (divisible by 8, max {max_tile_height_size}): {tile_height}")
|
155 |
+
print(f"Tile Width (divisible by 8, max {max_tile_width_size}): {tile_width}")
|
156 |
+
print(f"Number of Columns (horizontal tiles): {num_cols}")
|
157 |
+
print(f"Number of Rows (vertical tiles): {num_rows}")
|
158 |
+
print(f"Original Target Height: {target_height}")
|
159 |
+
print(f"Original Target Width: {target_width}")
|
160 |
+
print(f"New Target Height (total covered height): {new_target_height}")
|
161 |
+
print(f"New Target Width (total covered width): {new_target_width}\n")
|
162 |
+
|
163 |
+
return new_target_height, new_target_width, tile_height, tile_width
|
164 |
+
|
165 |
+
def do_calc_tile(target_height, target_width, overlap_pixels, max_tile_size):
|
166 |
+
new_target_height, new_target_width, tile_height, tile_width = calc_tile_size(target_height, target_width, overlap_pixels, max_tile_size)
|
167 |
+
return gr.update(value=tile_height), gr.update(value=tile_width), gr.update(value=new_target_height), gr.update(value=new_target_width)
|
168 |
+
|
169 |
+
def clear_result():
|
170 |
+
return gr.update(value=None)
|
171 |
+
|
172 |
+
def run_for_examples(left_prompt, center_prompt, right_prompt, negative_prompt, left_gs, center_gs, right_gs, overlap_pixels, steps, generation_seed, scheduler, tile_height, tile_width, target_height, target_width, max_tile_width):
|
173 |
+
return predict(left_prompt, center_prompt, right_prompt, negative_prompt, left_gs, center_gs, right_gs, overlap_pixels, steps, generation_seed, scheduler, tile_height, tile_width, target_height, target_width)
|
174 |
+
|
175 |
+
def randomize_seed_fn(generation_seed: int, randomize_seed: bool) -> int:
|
176 |
+
if randomize_seed:
|
177 |
+
generation_seed = random.randint(0, MAX_SEED)
|
178 |
+
return generation_seed
|
179 |
+
|
180 |
+
css = """
|
181 |
+
.gradio-container .fillable {
|
182 |
+
width: 95% !important;
|
183 |
+
max-width: unset !important;
|
184 |
+
}
|
185 |
+
"""
|
186 |
+
|
187 |
+
title = """<h1 align="center">Mixture-of-Diffusers for SDXL Tiling Pipeline</h1>
|
188 |
+
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; text-align: center; overflow:hidden;">
|
189 |
+
<p>This project implements an SDXL pipeline based on the original project, <a href='https://github.com/albarji/mixture-of-diffusers'>Mixture-of-Diffusers</a>. For more information, see the <a href='https://arxiv.org/abs/2302.02412'>Paper</a>.</p>
|
190 |
+
<div style="display: flex; flex-direction: row; gap: 20px; justify-content: center;"> <!-- Novo div flex container para os dois últimos parágrafos -->
|
191 |
+
<p style="display: flex;gap: 6px;margin: 0; transform: translateY(40%);">
|
192 |
+
<a href="https://huggingface.co/spaces/elismasilva/mixture-of-diffusers-sdxl-tiling?duplicate=true">
|
193 |
+
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-md.svg" alt="Duplicate this Space">
|
194 |
+
</a> to skip the queue and enjoy faster inference on the GPU of your choice
|
195 |
+
</p>
|
196 |
+
<p><a href='https://ko-fi.com/S6S71ACXMR' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi6.png?v=6' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a></p>
|
197 |
+
</div>
|
198 |
+
</div>
|
199 |
+
"""
|
200 |
+
tips="""
|
201 |
+
### Method
|
202 |
+
The method proposed here strives to provide a better tool for image composition by using several diffusion processes in parallel, each configured with a specific prompt and settings, and focused on a particular region of the image. The mixture of diffusion processes is done in a way that harmonizes the generation process, preventing "seam" effects in the generated image.
|
203 |
+
Using several diffusion processes in parallel has also practical advantages when generating very large images, as the GPU memory requirements are similar to that of generating an image of the size of a single tile.
|
204 |
+
For practical demonstration purposes, this demo only covers image generation using 1x3 tiles. However, in the pipeline, you can freely increase the number of rows and columns as well as specify a row overlap.
|
205 |
+
|
206 |
+
### Tips
|
207 |
+
1. Describe the same environment for all image elements in your prompt. This helps to better harmonize the final image.
|
208 |
+
2. Keep the same stylization in both prompts.
|
209 |
+
3. Test different overlap sizes.
|
210 |
+
4. Test fews increments on seed.
|
211 |
+
5. This may take a while.
|
212 |
+
6. Enjoy!
|
213 |
+
"""
|
214 |
+
|
215 |
+
about = r"""
|
216 |
+
📧 **Contact**
|
217 |
+
<br>
|
218 |
+
If you have any questions or suggestions, feel free to send your question to <b>[email protected]</b>.
|
219 |
+
"""
|
220 |
+
|
221 |
+
with gr.Blocks(css=css) as app:
|
222 |
+
gr.Markdown(title)
|
223 |
+
|
224 |
+
with gr.Row():
|
225 |
+
with gr.Column(scale=7):
|
226 |
+
generate_button = gr.Button("Generate")
|
227 |
+
with gr.Row():
|
228 |
+
with gr.Column(scale=1):
|
229 |
+
gr.Markdown("### Left region")
|
230 |
+
left_prompt = gr.Textbox(lines=4,
|
231 |
+
label="Prompt for left side of the image")
|
232 |
+
left_gs = gr.Slider(minimum=0,
|
233 |
+
maximum=15,
|
234 |
+
value=7,
|
235 |
+
step=1,
|
236 |
+
label="Left CFG scale")
|
237 |
+
with gr.Column(scale=1):
|
238 |
+
gr.Markdown("### Center region")
|
239 |
+
center_prompt = gr.Textbox(lines=4,
|
240 |
+
label="Prompt for the center of the image")
|
241 |
+
center_gs = gr.Slider(minimum=0,
|
242 |
+
maximum=15,
|
243 |
+
value=7,
|
244 |
+
step=1,
|
245 |
+
label="Center CFG scale")
|
246 |
+
with gr.Column(scale=1):
|
247 |
+
gr.Markdown("### Right region")
|
248 |
+
right_prompt = gr.Textbox(lines=4,
|
249 |
+
label="Prompt for the right side of the image")
|
250 |
+
right_gs = gr.Slider(minimum=0,
|
251 |
+
maximum=15,
|
252 |
+
value=7,
|
253 |
+
step=1,
|
254 |
+
label="Right CFG scale")
|
255 |
+
with gr.Row():
|
256 |
+
negative_prompt = gr.Textbox(lines=2,
|
257 |
+
label="Negative prompt for the image",
|
258 |
+
value="nsfw, lowres, bad anatomy, bad hands, duplicate, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, blurry")
|
259 |
+
with gr.Row():
|
260 |
+
result = gr.Image(
|
261 |
+
label="Generated Image",
|
262 |
+
show_label=True,
|
263 |
+
format="png",
|
264 |
+
interactive=False,
|
265 |
+
# allow_preview=True,
|
266 |
+
# preview=True,
|
267 |
+
scale=1,
|
268 |
+
|
269 |
+
)
|
270 |
+
with gr.Column():
|
271 |
+
gr.Markdown(tips)
|
272 |
+
with gr.Sidebar(label="Parameters", open=True):
|
273 |
+
gr.Markdown("### General parameters")
|
274 |
+
with gr.Row():
|
275 |
+
height = gr.Slider(label="Height",
|
276 |
+
value=1024,
|
277 |
+
step=8,
|
278 |
+
visible=True,
|
279 |
+
minimum=512,
|
280 |
+
maximum=1024)
|
281 |
+
width = gr.Slider(label="Width",
|
282 |
+
value=1280,
|
283 |
+
step=8,
|
284 |
+
visible=True,
|
285 |
+
minimum=512,
|
286 |
+
maximum=3840)
|
287 |
+
overlap = gr.Slider(minimum=0,
|
288 |
+
maximum=512,
|
289 |
+
value=128,
|
290 |
+
step=8,
|
291 |
+
label="Tile Overlap")
|
292 |
+
max_tile_size = gr.Dropdown(label="Max. Tile Size", choices=[1024, 1280], value=1280)
|
293 |
+
calc_tile = gr.Button("Calculate Tile Size")
|
294 |
+
with gr.Row():
|
295 |
+
tile_height = gr.Textbox(label="Tile height", value=1024, interactive=False)
|
296 |
+
tile_width = gr.Textbox(label="Tile width", value=1024, interactive=False)
|
297 |
+
with gr.Row():
|
298 |
+
new_target_height = gr.Textbox(label="New image height", value=1024, interactive=False)
|
299 |
+
new_target_width = gr.Textbox(label="New image width", value=1024, interactive=False)
|
300 |
+
with gr.Row():
|
301 |
+
steps = gr.Slider(minimum=1,
|
302 |
+
maximum=50,
|
303 |
+
value=30,
|
304 |
+
step=1,
|
305 |
+
label="Inference steps")
|
306 |
+
|
307 |
+
generation_seed = gr.Slider(label="Seed",
|
308 |
+
minimum=0,
|
309 |
+
maximum=MAX_SEED,
|
310 |
+
step=1,
|
311 |
+
value=0)
|
312 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=False)
|
313 |
+
with gr.Row():
|
314 |
+
scheduler = gr.Dropdown(
|
315 |
+
label="Schedulers",
|
316 |
+
choices=SCHEDULERS,
|
317 |
+
value=SCHEDULERS[0],
|
318 |
+
)
|
319 |
+
with gr.Row():
|
320 |
+
gr.Examples(
|
321 |
+
examples=[
|
322 |
+
[
|
323 |
+
"Iron Man, repulsor rays blasting enemies in destroyed cityscape, sparks, energy trails, crumbling skyscrapers, smoke, debris, cinematic lighting, photorealistic, intense action. Focus: Iron Man.",
|
324 |
+
"Captain America charging forward, vibranium shield deflecting energy blasts in destroyed cityscape, collapsing buildings, rubble streets, battle-damaged suit, determined expression, distant explosions, cinematic composition, realistic rendering. Focus: Captain America.",
|
325 |
+
"Thor wielding Stormbreaker in destroyed cityscape, lightning crackling, powerful strike downwards, shattered buildings, burning debris, ground trembling, Asgardian armor, cinematic photography, realistic details. Focus: Thor.",
|
326 |
+
negative_prompt.value,
|
327 |
+
5, 5, 5,
|
328 |
+
160,
|
329 |
+
30,
|
330 |
+
1706922014,
|
331 |
+
"UniPCMultistepScheduler",
|
332 |
+
1024,
|
333 |
+
1280,
|
334 |
+
1024,
|
335 |
+
3840,
|
336 |
+
1024
|
337 |
+
],
|
338 |
+
[
|
339 |
+
"A charming house in the countryside, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
340 |
+
"A dirt road in the countryside crossing pastures, by jakub rozalski, sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
341 |
+
"An old and rusty giant robot lying on a dirt road, by jakub rozalski, dark sunset lighting, elegant, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
342 |
+
negative_prompt.value,
|
343 |
+
7, 7, 7,
|
344 |
+
256,
|
345 |
+
30,
|
346 |
+
297984183,
|
347 |
+
"DPMSolverMultistepScheduler-Karras-SDE",
|
348 |
+
1024,
|
349 |
+
1280,
|
350 |
+
1024,
|
351 |
+
3840,
|
352 |
+
1280
|
353 |
+
],
|
354 |
+
[
|
355 |
+
"Abstract decorative illustration, by joan miro and gustav klimt and marlina vera and loish, elegant, intricate, highly detailed, smooth, sharp focus, vibrant colors, artstation, stunning masterpiece",
|
356 |
+
"Abstract decorative illustration, by joan miro and gustav klimt and marlina vera and loish, elegant, intricate, highly detailed, smooth, sharp focus, vibrant colors, artstation, stunning masterpiece",
|
357 |
+
"Abstract decorative illustration, by joan miro and gustav klimt and marlina vera and loish, elegant, intricate, highly detailed, smooth, sharp focus, vibrant colors, artstation, stunning masterpiece",
|
358 |
+
negative_prompt.value,
|
359 |
+
7, 7, 7,
|
360 |
+
128,
|
361 |
+
30,
|
362 |
+
580541206,
|
363 |
+
"LMSDiscreteScheduler",
|
364 |
+
1024,
|
365 |
+
768,
|
366 |
+
1024,
|
367 |
+
2048,
|
368 |
+
1280
|
369 |
+
],
|
370 |
+
[
|
371 |
+
"Magical diagrams and runes written with chalk on a blackboard, elegant, intricate, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
372 |
+
"Magical diagrams and runes written with chalk on a blackboard, elegant, intricate, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
373 |
+
"Magical diagrams and runes written with chalk on a blackboard, elegant, intricate, highly detailed, smooth, sharp focus, artstation, stunning masterpiece",
|
374 |
+
negative_prompt.value,
|
375 |
+
9, 9, 9,
|
376 |
+
128,
|
377 |
+
30,
|
378 |
+
12591765619,
|
379 |
+
"LMSDiscreteScheduler",
|
380 |
+
1024,
|
381 |
+
768,
|
382 |
+
1024,
|
383 |
+
2048,
|
384 |
+
1280
|
385 |
+
]
|
386 |
+
],
|
387 |
+
inputs=[left_prompt, center_prompt, right_prompt, negative_prompt, left_gs, center_gs, right_gs, overlap, steps, generation_seed, scheduler, tile_height, tile_width, height, width, max_tile_size],
|
388 |
+
fn=run_for_examples,
|
389 |
+
outputs=result,
|
390 |
+
cache_examples=True
|
391 |
+
)
|
392 |
+
|
393 |
+
event_calc_tile_size={"fn": do_calc_tile, "inputs":[height, width, overlap, max_tile_size], "outputs":[tile_height, tile_width, new_target_height, new_target_width]}
|
394 |
+
calc_tile.click(**event_calc_tile_size)
|
395 |
+
|
396 |
+
generate_button.click(
|
397 |
+
fn=clear_result,
|
398 |
+
inputs=None,
|
399 |
+
outputs=result,
|
400 |
+
).then(**event_calc_tile_size
|
401 |
+
).then(
|
402 |
+
fn=randomize_seed_fn,
|
403 |
+
inputs=[generation_seed, randomize_seed],
|
404 |
+
outputs=generation_seed,
|
405 |
+
queue=False,
|
406 |
+
api_name=False,
|
407 |
+
).then(
|
408 |
+
fn=predict,
|
409 |
+
inputs=[left_prompt, center_prompt, right_prompt, negative_prompt, left_gs, center_gs, right_gs, overlap, steps, generation_seed, scheduler, tile_height, tile_width, new_target_height, new_target_width],
|
410 |
+
outputs=result,
|
411 |
+
)
|
412 |
+
gr.Markdown(about)
|
413 |
+
app.launch(share=False)
|
mixture_tiling_sdxl.py
ADDED
@@ -0,0 +1,1198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
from enum import Enum
|
16 |
+
import inspect
|
17 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
18 |
+
|
19 |
+
import torch
|
20 |
+
from transformers import (
|
21 |
+
CLIPImageProcessor,
|
22 |
+
CLIPTextModel,
|
23 |
+
CLIPTextModelWithProjection,
|
24 |
+
CLIPTokenizer,
|
25 |
+
CLIPVisionModelWithProjection,
|
26 |
+
)
|
27 |
+
|
28 |
+
from diffusers.image_processor import VaeImageProcessor
|
29 |
+
from diffusers.loaders import (
|
30 |
+
FromSingleFileMixin,
|
31 |
+
IPAdapterMixin,
|
32 |
+
StableDiffusionXLLoraLoaderMixin,
|
33 |
+
TextualInversionLoaderMixin,
|
34 |
+
)
|
35 |
+
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
36 |
+
from diffusers.models.attention_processor import (
|
37 |
+
AttnProcessor2_0,
|
38 |
+
FusedAttnProcessor2_0,
|
39 |
+
XFormersAttnProcessor,
|
40 |
+
)
|
41 |
+
from diffusers.models.lora import adjust_lora_scale_text_encoder
|
42 |
+
from diffusers.schedulers import KarrasDiffusionSchedulers, LMSDiscreteScheduler
|
43 |
+
from diffusers.utils import (
|
44 |
+
USE_PEFT_BACKEND,
|
45 |
+
is_invisible_watermark_available,
|
46 |
+
is_torch_xla_available,
|
47 |
+
logging,
|
48 |
+
replace_example_docstring,
|
49 |
+
scale_lora_layers,
|
50 |
+
unscale_lora_layers,
|
51 |
+
)
|
52 |
+
from diffusers.utils.torch_utils import randn_tensor
|
53 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
|
54 |
+
from diffusers.pipelines.stable_diffusion_xl.pipeline_output import StableDiffusionXLPipelineOutput
|
55 |
+
|
56 |
+
try:
|
57 |
+
from ligo.segments import segment
|
58 |
+
except ImportError:
|
59 |
+
raise ImportError("Please install transformers and ligo-segments to use the mixture pipeline")
|
60 |
+
|
61 |
+
if is_invisible_watermark_available():
|
62 |
+
from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker
|
63 |
+
|
64 |
+
if is_torch_xla_available():
|
65 |
+
import torch_xla.core.xla_model as xm
|
66 |
+
|
67 |
+
XLA_AVAILABLE = True
|
68 |
+
else:
|
69 |
+
XLA_AVAILABLE = False
|
70 |
+
|
71 |
+
|
72 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
73 |
+
|
74 |
+
EXAMPLE_DOC_STRING = """
|
75 |
+
Examples:
|
76 |
+
```py
|
77 |
+
>>> import torch
|
78 |
+
>>> from diffusers import StableDiffusionXLPipeline
|
79 |
+
|
80 |
+
>>> pipe = StableDiffusionXLPipeline.from_pretrained(
|
81 |
+
... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
82 |
+
... )
|
83 |
+
>>> pipe = pipe.to("cuda")
|
84 |
+
|
85 |
+
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
86 |
+
>>> image = pipe(prompt).images[0]
|
87 |
+
```
|
88 |
+
"""
|
89 |
+
|
90 |
+
def _tile2pixel_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
91 |
+
"""Given a tile row and column numbers returns the range of pixels affected by that tiles in the overall image
|
92 |
+
|
93 |
+
Returns a tuple with:
|
94 |
+
- Starting coordinates of rows in pixel space
|
95 |
+
- Ending coordinates of rows in pixel space
|
96 |
+
- Starting coordinates of columns in pixel space
|
97 |
+
- Ending coordinates of columns in pixel space
|
98 |
+
"""
|
99 |
+
px_row_init = 0 if tile_row == 0 else tile_row * (tile_height - tile_row_overlap)
|
100 |
+
px_row_end = px_row_init + tile_height
|
101 |
+
px_col_init = 0 if tile_col == 0 else tile_col * (tile_width - tile_col_overlap)
|
102 |
+
px_col_end = px_col_init + tile_width
|
103 |
+
return px_row_init, px_row_end, px_col_init, px_col_end
|
104 |
+
|
105 |
+
|
106 |
+
def _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end):
|
107 |
+
"""Translates coordinates in pixel space to coordinates in latent space"""
|
108 |
+
return px_row_init // 8, px_row_end // 8, px_col_init // 8, px_col_end // 8
|
109 |
+
|
110 |
+
|
111 |
+
def _tile2latent_indices(tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap):
|
112 |
+
"""Given a tile row and column numbers returns the range of latents affected by that tiles in the overall image
|
113 |
+
|
114 |
+
Returns a tuple with:
|
115 |
+
- Starting coordinates of rows in latent space
|
116 |
+
- Ending coordinates of rows in latent space
|
117 |
+
- Starting coordinates of columns in latent space
|
118 |
+
- Ending coordinates of columns in latent space
|
119 |
+
"""
|
120 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2pixel_indices(
|
121 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
122 |
+
)
|
123 |
+
return _pixel2latent_indices(px_row_init, px_row_end, px_col_init, px_col_end)
|
124 |
+
|
125 |
+
|
126 |
+
def _tile2latent_exclusive_indices(
|
127 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap, rows, columns
|
128 |
+
):
|
129 |
+
"""Given a tile row and column numbers returns the range of latents affected only by that tile in the overall image
|
130 |
+
|
131 |
+
Returns a tuple with:
|
132 |
+
- Starting coordinates of rows in latent space
|
133 |
+
- Ending coordinates of rows in latent space
|
134 |
+
- Starting coordinates of columns in latent space
|
135 |
+
- Ending coordinates of columns in latent space
|
136 |
+
"""
|
137 |
+
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
138 |
+
tile_row, tile_col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
139 |
+
)
|
140 |
+
row_segment = segment(row_init, row_end)
|
141 |
+
col_segment = segment(col_init, col_end)
|
142 |
+
# Iterate over the rest of tiles, clipping the region for the current tile
|
143 |
+
for row in range(rows):
|
144 |
+
for column in range(columns):
|
145 |
+
if row != tile_row and column != tile_col:
|
146 |
+
clip_row_init, clip_row_end, clip_col_init, clip_col_end = _tile2latent_indices(
|
147 |
+
row, column, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
148 |
+
)
|
149 |
+
row_segment = row_segment - segment(clip_row_init, clip_row_end)
|
150 |
+
col_segment = col_segment - segment(clip_col_init, clip_col_end)
|
151 |
+
# return row_init, row_end, col_init, col_end
|
152 |
+
return row_segment[0], row_segment[1], col_segment[0], col_segment[1]
|
153 |
+
|
154 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
|
155 |
+
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
|
156 |
+
r"""
|
157 |
+
Rescales `noise_cfg` tensor based on `guidance_rescale` to improve image quality and fix overexposure. Based on
|
158 |
+
Section 3.4 from [Common Diffusion Noise Schedules and Sample Steps are
|
159 |
+
Flawed](https://arxiv.org/pdf/2305.08891.pdf).
|
160 |
+
|
161 |
+
Args:
|
162 |
+
noise_cfg (`torch.Tensor`):
|
163 |
+
The predicted noise tensor for the guided diffusion process.
|
164 |
+
noise_pred_text (`torch.Tensor`):
|
165 |
+
The predicted noise tensor for the text-guided diffusion process.
|
166 |
+
guidance_rescale (`float`, *optional*, defaults to 0.0):
|
167 |
+
A rescale factor applied to the noise predictions.
|
168 |
+
|
169 |
+
Returns:
|
170 |
+
noise_cfg (`torch.Tensor`): The rescaled noise prediction tensor.
|
171 |
+
"""
|
172 |
+
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
|
173 |
+
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
|
174 |
+
# rescale the results from guidance (fixes overexposure)
|
175 |
+
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
|
176 |
+
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
|
177 |
+
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
|
178 |
+
return noise_cfg
|
179 |
+
|
180 |
+
|
181 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
182 |
+
def retrieve_timesteps(
|
183 |
+
scheduler,
|
184 |
+
num_inference_steps: Optional[int] = None,
|
185 |
+
device: Optional[Union[str, torch.device]] = None,
|
186 |
+
timesteps: Optional[List[int]] = None,
|
187 |
+
sigmas: Optional[List[float]] = None,
|
188 |
+
**kwargs,
|
189 |
+
):
|
190 |
+
r"""
|
191 |
+
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
192 |
+
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
193 |
+
|
194 |
+
Args:
|
195 |
+
scheduler (`SchedulerMixin`):
|
196 |
+
The scheduler to get timesteps from.
|
197 |
+
num_inference_steps (`int`):
|
198 |
+
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
199 |
+
must be `None`.
|
200 |
+
device (`str` or `torch.device`, *optional*):
|
201 |
+
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
202 |
+
timesteps (`List[int]`, *optional*):
|
203 |
+
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
204 |
+
`num_inference_steps` and `sigmas` must be `None`.
|
205 |
+
sigmas (`List[float]`, *optional*):
|
206 |
+
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
207 |
+
`num_inference_steps` and `timesteps` must be `None`.
|
208 |
+
|
209 |
+
Returns:
|
210 |
+
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
211 |
+
second element is the number of inference steps.
|
212 |
+
"""
|
213 |
+
|
214 |
+
if timesteps is not None and sigmas is not None:
|
215 |
+
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
216 |
+
if timesteps is not None:
|
217 |
+
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
218 |
+
if not accepts_timesteps:
|
219 |
+
raise ValueError(
|
220 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
221 |
+
f" timestep schedules. Please check whether you are using the correct scheduler."
|
222 |
+
)
|
223 |
+
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
224 |
+
timesteps = scheduler.timesteps
|
225 |
+
num_inference_steps = len(timesteps)
|
226 |
+
elif sigmas is not None:
|
227 |
+
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
228 |
+
if not accept_sigmas:
|
229 |
+
raise ValueError(
|
230 |
+
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
231 |
+
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
232 |
+
)
|
233 |
+
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
234 |
+
timesteps = scheduler.timesteps
|
235 |
+
num_inference_steps = len(timesteps)
|
236 |
+
else:
|
237 |
+
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
238 |
+
timesteps = scheduler.timesteps
|
239 |
+
return timesteps, num_inference_steps
|
240 |
+
|
241 |
+
|
242 |
+
class StableDiffusionXLTilingPipeline(
|
243 |
+
DiffusionPipeline,
|
244 |
+
StableDiffusionMixin,
|
245 |
+
FromSingleFileMixin,
|
246 |
+
StableDiffusionXLLoraLoaderMixin,
|
247 |
+
TextualInversionLoaderMixin,
|
248 |
+
IPAdapterMixin,
|
249 |
+
):
|
250 |
+
r"""
|
251 |
+
Pipeline for text-to-image generation using Stable Diffusion XL.
|
252 |
+
|
253 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
254 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
255 |
+
|
256 |
+
The pipeline also inherits the following loading methods:
|
257 |
+
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
|
258 |
+
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
|
259 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
|
260 |
+
- [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
|
261 |
+
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
|
262 |
+
|
263 |
+
Args:
|
264 |
+
vae ([`AutoencoderKL`]):
|
265 |
+
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
266 |
+
text_encoder ([`CLIPTextModel`]):
|
267 |
+
Frozen text-encoder. Stable Diffusion XL uses the text portion of
|
268 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
269 |
+
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
270 |
+
text_encoder_2 ([` CLIPTextModelWithProjection`]):
|
271 |
+
Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of
|
272 |
+
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
|
273 |
+
specifically the
|
274 |
+
[laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
|
275 |
+
variant.
|
276 |
+
tokenizer (`CLIPTokenizer`):
|
277 |
+
Tokenizer of class
|
278 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
279 |
+
tokenizer_2 (`CLIPTokenizer`):
|
280 |
+
Second Tokenizer of class
|
281 |
+
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
282 |
+
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
283 |
+
scheduler ([`SchedulerMixin`]):
|
284 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
|
285 |
+
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
286 |
+
force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`):
|
287 |
+
Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of
|
288 |
+
`stabilityai/stable-diffusion-xl-base-1-0`.
|
289 |
+
add_watermarker (`bool`, *optional*):
|
290 |
+
Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to
|
291 |
+
watermark output images. If not defined, it will default to True if the package is installed, otherwise no
|
292 |
+
watermarker will be used.
|
293 |
+
"""
|
294 |
+
|
295 |
+
model_cpu_offload_seq = "text_encoder->text_encoder_2->image_encoder->unet->vae"
|
296 |
+
_optional_components = [
|
297 |
+
"tokenizer",
|
298 |
+
"tokenizer_2",
|
299 |
+
"text_encoder",
|
300 |
+
"text_encoder_2",
|
301 |
+
"image_encoder",
|
302 |
+
"feature_extractor",
|
303 |
+
]
|
304 |
+
|
305 |
+
def __init__(
|
306 |
+
self,
|
307 |
+
vae: AutoencoderKL,
|
308 |
+
text_encoder: CLIPTextModel,
|
309 |
+
text_encoder_2: CLIPTextModelWithProjection,
|
310 |
+
tokenizer: CLIPTokenizer,
|
311 |
+
tokenizer_2: CLIPTokenizer,
|
312 |
+
unet: UNet2DConditionModel,
|
313 |
+
scheduler: KarrasDiffusionSchedulers,
|
314 |
+
image_encoder: CLIPVisionModelWithProjection = None,
|
315 |
+
feature_extractor: CLIPImageProcessor = None,
|
316 |
+
force_zeros_for_empty_prompt: bool = True,
|
317 |
+
add_watermarker: Optional[bool] = None,
|
318 |
+
):
|
319 |
+
super().__init__()
|
320 |
+
|
321 |
+
self.register_modules(
|
322 |
+
vae=vae,
|
323 |
+
text_encoder=text_encoder,
|
324 |
+
text_encoder_2=text_encoder_2,
|
325 |
+
tokenizer=tokenizer,
|
326 |
+
tokenizer_2=tokenizer_2,
|
327 |
+
unet=unet,
|
328 |
+
scheduler=scheduler,
|
329 |
+
image_encoder=image_encoder,
|
330 |
+
feature_extractor=feature_extractor,
|
331 |
+
)
|
332 |
+
self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt)
|
333 |
+
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
334 |
+
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
335 |
+
|
336 |
+
self.default_sample_size = (
|
337 |
+
self.unet.config.sample_size
|
338 |
+
if hasattr(self, "unet") and self.unet is not None and hasattr(self.unet.config, "sample_size")
|
339 |
+
else 128
|
340 |
+
)
|
341 |
+
|
342 |
+
add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available()
|
343 |
+
|
344 |
+
if add_watermarker:
|
345 |
+
self.watermark = StableDiffusionXLWatermarker()
|
346 |
+
else:
|
347 |
+
self.watermark = None
|
348 |
+
|
349 |
+
class SeedTilesMode(Enum):
|
350 |
+
"""Modes in which the latents of a particular tile can be re-seeded"""
|
351 |
+
|
352 |
+
FULL = "full"
|
353 |
+
EXCLUSIVE = "exclusive"
|
354 |
+
|
355 |
+
def encode_prompt(
|
356 |
+
self,
|
357 |
+
prompt: str,
|
358 |
+
prompt_2: Optional[str] = None,
|
359 |
+
device: Optional[torch.device] = None,
|
360 |
+
num_images_per_prompt: int = 1,
|
361 |
+
do_classifier_free_guidance: bool = True,
|
362 |
+
negative_prompt: Optional[str] = None,
|
363 |
+
negative_prompt_2: Optional[str] = None,
|
364 |
+
prompt_embeds: Optional[torch.Tensor] = None,
|
365 |
+
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
366 |
+
pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
367 |
+
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
|
368 |
+
lora_scale: Optional[float] = None,
|
369 |
+
clip_skip: Optional[int] = None,
|
370 |
+
):
|
371 |
+
r"""
|
372 |
+
Encodes the prompt into text encoder hidden states.
|
373 |
+
|
374 |
+
Args:
|
375 |
+
prompt (`str` or `List[str]`, *optional*):
|
376 |
+
prompt to be encoded
|
377 |
+
prompt_2 (`str` or `List[str]`, *optional*):
|
378 |
+
The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
|
379 |
+
used in both text-encoders
|
380 |
+
device: (`torch.device`):
|
381 |
+
torch device
|
382 |
+
num_images_per_prompt (`int`):
|
383 |
+
number of images that should be generated per prompt
|
384 |
+
do_classifier_free_guidance (`bool`):
|
385 |
+
whether to use classifier free guidance or not
|
386 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
387 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
388 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
389 |
+
less than `1`).
|
390 |
+
negative_prompt_2 (`str` or `List[str]`, *optional*):
|
391 |
+
The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
|
392 |
+
`text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders
|
393 |
+
prompt_embeds (`torch.Tensor`, *optional*):
|
394 |
+
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
395 |
+
provided, text embeddings will be generated from `prompt` input argument.
|
396 |
+
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
397 |
+
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
398 |
+
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
399 |
+
argument.
|
400 |
+
pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
401 |
+
Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
|
402 |
+
If not provided, pooled text embeddings will be generated from `prompt` input argument.
|
403 |
+
negative_pooled_prompt_embeds (`torch.Tensor`, *optional*):
|
404 |
+
Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
405 |
+
weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
|
406 |
+
input argument.
|
407 |
+
lora_scale (`float`, *optional*):
|
408 |
+
A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
|
409 |
+
clip_skip (`int`, *optional*):
|
410 |
+
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
|
411 |
+
the output of the pre-final layer will be used for computing the prompt embeddings.
|
412 |
+
"""
|
413 |
+
device = device or self._execution_device
|
414 |
+
|
415 |
+
# set lora scale so that monkey patched LoRA
|
416 |
+
# function of text encoder can correctly access it
|
417 |
+
if lora_scale is not None and isinstance(self, StableDiffusionXLLoraLoaderMixin):
|
418 |
+
self._lora_scale = lora_scale
|
419 |
+
|
420 |
+
# dynamically adjust the LoRA scale
|
421 |
+
if self.text_encoder is not None:
|
422 |
+
if not USE_PEFT_BACKEND:
|
423 |
+
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
|
424 |
+
else:
|
425 |
+
scale_lora_layers(self.text_encoder, lora_scale)
|
426 |
+
|
427 |
+
if self.text_encoder_2 is not None:
|
428 |
+
if not USE_PEFT_BACKEND:
|
429 |
+
adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale)
|
430 |
+
else:
|
431 |
+
scale_lora_layers(self.text_encoder_2, lora_scale)
|
432 |
+
|
433 |
+
prompt = [prompt] if isinstance(prompt, str) else prompt
|
434 |
+
|
435 |
+
if prompt is not None:
|
436 |
+
batch_size = len(prompt)
|
437 |
+
else:
|
438 |
+
batch_size = prompt_embeds.shape[0]
|
439 |
+
|
440 |
+
# Define tokenizers and text encoders
|
441 |
+
tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2]
|
442 |
+
text_encoders = (
|
443 |
+
[self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2]
|
444 |
+
)
|
445 |
+
|
446 |
+
if prompt_embeds is None:
|
447 |
+
prompt_2 = prompt_2 or prompt
|
448 |
+
prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
|
449 |
+
|
450 |
+
# textual inversion: process multi-vector tokens if necessary
|
451 |
+
prompt_embeds_list = []
|
452 |
+
prompts = [prompt, prompt_2]
|
453 |
+
for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders):
|
454 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
455 |
+
prompt = self.maybe_convert_prompt(prompt, tokenizer)
|
456 |
+
|
457 |
+
text_inputs = tokenizer(
|
458 |
+
prompt,
|
459 |
+
padding="max_length",
|
460 |
+
max_length=tokenizer.model_max_length,
|
461 |
+
truncation=True,
|
462 |
+
return_tensors="pt",
|
463 |
+
)
|
464 |
+
|
465 |
+
text_input_ids = text_inputs.input_ids
|
466 |
+
untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
467 |
+
|
468 |
+
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
469 |
+
text_input_ids, untruncated_ids
|
470 |
+
):
|
471 |
+
removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1])
|
472 |
+
logger.warning(
|
473 |
+
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
474 |
+
f" {tokenizer.model_max_length} tokens: {removed_text}"
|
475 |
+
)
|
476 |
+
|
477 |
+
prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
|
478 |
+
|
479 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
480 |
+
if pooled_prompt_embeds is None and prompt_embeds[0].ndim == 2:
|
481 |
+
pooled_prompt_embeds = prompt_embeds[0]
|
482 |
+
|
483 |
+
if clip_skip is None:
|
484 |
+
prompt_embeds = prompt_embeds.hidden_states[-2]
|
485 |
+
else:
|
486 |
+
# "2" because SDXL always indexes from the penultimate layer.
|
487 |
+
prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
|
488 |
+
|
489 |
+
prompt_embeds_list.append(prompt_embeds)
|
490 |
+
|
491 |
+
prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)
|
492 |
+
|
493 |
+
# get unconditional embeddings for classifier free guidance
|
494 |
+
zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt
|
495 |
+
if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt:
|
496 |
+
negative_prompt_embeds = torch.zeros_like(prompt_embeds)
|
497 |
+
negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)
|
498 |
+
elif do_classifier_free_guidance and negative_prompt_embeds is None:
|
499 |
+
negative_prompt = negative_prompt or ""
|
500 |
+
negative_prompt_2 = negative_prompt_2 or negative_prompt
|
501 |
+
|
502 |
+
# normalize str to list
|
503 |
+
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
504 |
+
negative_prompt_2 = (
|
505 |
+
batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
|
506 |
+
)
|
507 |
+
|
508 |
+
uncond_tokens: List[str]
|
509 |
+
if prompt is not None and type(prompt) is not type(negative_prompt):
|
510 |
+
raise TypeError(
|
511 |
+
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
512 |
+
f" {type(prompt)}."
|
513 |
+
)
|
514 |
+
elif batch_size != len(negative_prompt):
|
515 |
+
raise ValueError(
|
516 |
+
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
517 |
+
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
518 |
+
" the batch size of `prompt`."
|
519 |
+
)
|
520 |
+
else:
|
521 |
+
uncond_tokens = [negative_prompt, negative_prompt_2]
|
522 |
+
|
523 |
+
negative_prompt_embeds_list = []
|
524 |
+
for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders):
|
525 |
+
if isinstance(self, TextualInversionLoaderMixin):
|
526 |
+
negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer)
|
527 |
+
|
528 |
+
max_length = prompt_embeds.shape[1]
|
529 |
+
uncond_input = tokenizer(
|
530 |
+
negative_prompt,
|
531 |
+
padding="max_length",
|
532 |
+
max_length=max_length,
|
533 |
+
truncation=True,
|
534 |
+
return_tensors="pt",
|
535 |
+
)
|
536 |
+
|
537 |
+
negative_prompt_embeds = text_encoder(
|
538 |
+
uncond_input.input_ids.to(device),
|
539 |
+
output_hidden_states=True,
|
540 |
+
)
|
541 |
+
|
542 |
+
# We are only ALWAYS interested in the pooled output of the final text encoder
|
543 |
+
if negative_pooled_prompt_embeds is None and negative_prompt_embeds[0].ndim == 2:
|
544 |
+
negative_pooled_prompt_embeds = negative_prompt_embeds[0]
|
545 |
+
negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]
|
546 |
+
|
547 |
+
negative_prompt_embeds_list.append(negative_prompt_embeds)
|
548 |
+
|
549 |
+
negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)
|
550 |
+
|
551 |
+
if self.text_encoder_2 is not None:
|
552 |
+
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
553 |
+
else:
|
554 |
+
prompt_embeds = prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
555 |
+
|
556 |
+
bs_embed, seq_len, _ = prompt_embeds.shape
|
557 |
+
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
558 |
+
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
559 |
+
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
560 |
+
|
561 |
+
if do_classifier_free_guidance:
|
562 |
+
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
563 |
+
seq_len = negative_prompt_embeds.shape[1]
|
564 |
+
|
565 |
+
if self.text_encoder_2 is not None:
|
566 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device)
|
567 |
+
else:
|
568 |
+
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.unet.dtype, device=device)
|
569 |
+
|
570 |
+
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
571 |
+
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
572 |
+
|
573 |
+
pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
574 |
+
bs_embed * num_images_per_prompt, -1
|
575 |
+
)
|
576 |
+
if do_classifier_free_guidance:
|
577 |
+
negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
|
578 |
+
bs_embed * num_images_per_prompt, -1
|
579 |
+
)
|
580 |
+
|
581 |
+
if self.text_encoder is not None:
|
582 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
583 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
584 |
+
unscale_lora_layers(self.text_encoder, lora_scale)
|
585 |
+
|
586 |
+
if self.text_encoder_2 is not None:
|
587 |
+
if isinstance(self, StableDiffusionXLLoraLoaderMixin) and USE_PEFT_BACKEND:
|
588 |
+
# Retrieve the original scale by scaling back the LoRA layers
|
589 |
+
unscale_lora_layers(self.text_encoder_2, lora_scale)
|
590 |
+
|
591 |
+
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
592 |
+
|
593 |
+
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
|
594 |
+
def prepare_extra_step_kwargs(self, generator, eta):
|
595 |
+
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
596 |
+
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
597 |
+
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
598 |
+
# and should be between [0, 1]
|
599 |
+
|
600 |
+
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
601 |
+
extra_step_kwargs = {}
|
602 |
+
if accepts_eta:
|
603 |
+
extra_step_kwargs["eta"] = eta
|
604 |
+
|
605 |
+
# check if the scheduler accepts generator
|
606 |
+
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
607 |
+
if accepts_generator:
|
608 |
+
extra_step_kwargs["generator"] = generator
|
609 |
+
return extra_step_kwargs
|
610 |
+
|
611 |
+
def check_inputs(
|
612 |
+
self,
|
613 |
+
prompt,
|
614 |
+
height,
|
615 |
+
width,
|
616 |
+
grid_cols,
|
617 |
+
seed_tiles_mode,
|
618 |
+
tiles_mode
|
619 |
+
):
|
620 |
+
if height % 8 != 0 or width % 8 != 0:
|
621 |
+
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
622 |
+
|
623 |
+
if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
624 |
+
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
625 |
+
|
626 |
+
if not isinstance(prompt, list) or not all(isinstance(row, list) for row in prompt):
|
627 |
+
raise ValueError(f"`prompt` has to be a list of lists but is {type(prompt)}")
|
628 |
+
|
629 |
+
if not all(len(row) == grid_cols for row in prompt):
|
630 |
+
raise ValueError("All prompt rows must have the same number of prompt columns")
|
631 |
+
|
632 |
+
if not isinstance(seed_tiles_mode, str) and (
|
633 |
+
not isinstance(seed_tiles_mode, list) or not all(isinstance(row, list) for row in seed_tiles_mode)
|
634 |
+
):
|
635 |
+
raise ValueError(f"`seed_tiles_mode` has to be a string or list of lists but is {type(prompt)}")
|
636 |
+
|
637 |
+
if any(mode not in tiles_mode for row in seed_tiles_mode for mode in row):
|
638 |
+
raise ValueError(f"Seed tiles mode must be one of {tiles_mode}")
|
639 |
+
|
640 |
+
def _get_add_time_ids(
|
641 |
+
self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None
|
642 |
+
):
|
643 |
+
add_time_ids = list(original_size + crops_coords_top_left + target_size)
|
644 |
+
|
645 |
+
passed_add_embed_dim = (
|
646 |
+
self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim
|
647 |
+
)
|
648 |
+
expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features
|
649 |
+
|
650 |
+
if expected_add_embed_dim != passed_add_embed_dim:
|
651 |
+
raise ValueError(
|
652 |
+
f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`."
|
653 |
+
)
|
654 |
+
|
655 |
+
add_time_ids = torch.tensor([add_time_ids], dtype=dtype)
|
656 |
+
return add_time_ids
|
657 |
+
|
658 |
+
def _gaussian_weights(self, tile_width, tile_height, nbatches, device, dtype):
|
659 |
+
"""Generates a gaussian mask of weights for tile contributions"""
|
660 |
+
import numpy as np
|
661 |
+
from numpy import exp, pi, sqrt
|
662 |
+
|
663 |
+
latent_width = tile_width // 8
|
664 |
+
latent_height = tile_height // 8
|
665 |
+
|
666 |
+
var = 0.01
|
667 |
+
midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1
|
668 |
+
x_probs = [
|
669 |
+
exp(-(x - midpoint) * (x - midpoint) / (latent_width * latent_width) / (2 * var)) / sqrt(2 * pi * var)
|
670 |
+
for x in range(latent_width)
|
671 |
+
]
|
672 |
+
midpoint = latent_height / 2
|
673 |
+
y_probs = [
|
674 |
+
exp(-(y - midpoint) * (y - midpoint) / (latent_height * latent_height) / (2 * var)) / sqrt(2 * pi * var)
|
675 |
+
for y in range(latent_height)
|
676 |
+
]
|
677 |
+
|
678 |
+
weights_np = np.outer(y_probs, x_probs)
|
679 |
+
weights_torch = torch.tensor(weights_np, device=device)
|
680 |
+
weights_torch = weights_torch.to(dtype)
|
681 |
+
return torch.tile(weights_torch, (nbatches, self.unet.config.in_channels, 1, 1))
|
682 |
+
|
683 |
+
def upcast_vae(self):
|
684 |
+
dtype = self.vae.dtype
|
685 |
+
self.vae.to(dtype=torch.float32)
|
686 |
+
use_torch_2_0_or_xformers = isinstance(
|
687 |
+
self.vae.decoder.mid_block.attentions[0].processor,
|
688 |
+
(
|
689 |
+
AttnProcessor2_0,
|
690 |
+
XFormersAttnProcessor,
|
691 |
+
FusedAttnProcessor2_0,
|
692 |
+
),
|
693 |
+
)
|
694 |
+
# if xformers or torch_2_0 is used attention block does not need
|
695 |
+
# to be in float32 which can save lots of memory
|
696 |
+
if use_torch_2_0_or_xformers:
|
697 |
+
self.vae.post_quant_conv.to(dtype)
|
698 |
+
self.vae.decoder.conv_in.to(dtype)
|
699 |
+
self.vae.decoder.mid_block.to(dtype)
|
700 |
+
|
701 |
+
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
|
702 |
+
def get_guidance_scale_embedding(
|
703 |
+
self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
|
704 |
+
) -> torch.Tensor:
|
705 |
+
"""
|
706 |
+
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
|
707 |
+
|
708 |
+
Args:
|
709 |
+
w (`torch.Tensor`):
|
710 |
+
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
|
711 |
+
embedding_dim (`int`, *optional*, defaults to 512):
|
712 |
+
Dimension of the embeddings to generate.
|
713 |
+
dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
|
714 |
+
Data type of the generated embeddings.
|
715 |
+
|
716 |
+
Returns:
|
717 |
+
`torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
|
718 |
+
"""
|
719 |
+
assert len(w.shape) == 1
|
720 |
+
w = w * 1000.0
|
721 |
+
|
722 |
+
half_dim = embedding_dim // 2
|
723 |
+
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
|
724 |
+
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
|
725 |
+
emb = w.to(dtype)[:, None] * emb[None, :]
|
726 |
+
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
727 |
+
if embedding_dim % 2 == 1: # zero pad
|
728 |
+
emb = torch.nn.functional.pad(emb, (0, 1))
|
729 |
+
assert emb.shape == (w.shape[0], embedding_dim)
|
730 |
+
return emb
|
731 |
+
|
732 |
+
@property
|
733 |
+
def guidance_scale(self):
|
734 |
+
return self._guidance_scale
|
735 |
+
|
736 |
+
@property
|
737 |
+
def clip_skip(self):
|
738 |
+
return self._clip_skip
|
739 |
+
|
740 |
+
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
741 |
+
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
742 |
+
# corresponds to doing no classifier free guidance.
|
743 |
+
@property
|
744 |
+
def do_classifier_free_guidance(self):
|
745 |
+
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
|
746 |
+
|
747 |
+
@property
|
748 |
+
def cross_attention_kwargs(self):
|
749 |
+
return self._cross_attention_kwargs
|
750 |
+
|
751 |
+
@property
|
752 |
+
def num_timesteps(self):
|
753 |
+
return self._num_timesteps
|
754 |
+
|
755 |
+
@property
|
756 |
+
def interrupt(self):
|
757 |
+
return self._interrupt
|
758 |
+
|
759 |
+
@torch.no_grad()
|
760 |
+
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
761 |
+
def __call__(
|
762 |
+
self,
|
763 |
+
prompt: Union[str, List[str]] = None,
|
764 |
+
height: Optional[int] = None,
|
765 |
+
width: Optional[int] = None,
|
766 |
+
num_inference_steps: int = 50,
|
767 |
+
guidance_scale: float = 5.0,
|
768 |
+
negative_prompt: Optional[Union[str, List[str]]] = None,
|
769 |
+
num_images_per_prompt: Optional[int] = 1,
|
770 |
+
eta: float = 0.0,
|
771 |
+
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
772 |
+
output_type: Optional[str] = "pil",
|
773 |
+
return_dict: bool = True,
|
774 |
+
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
775 |
+
original_size: Optional[Tuple[int, int]] = None,
|
776 |
+
crops_coords_top_left: Tuple[int, int] = (0, 0),
|
777 |
+
target_size: Optional[Tuple[int, int]] = None,
|
778 |
+
negative_original_size: Optional[Tuple[int, int]] = None,
|
779 |
+
negative_crops_coords_top_left: Tuple[int, int] = (0, 0),
|
780 |
+
negative_target_size: Optional[Tuple[int, int]] = None,
|
781 |
+
clip_skip: Optional[int] = None,
|
782 |
+
tile_height: Optional[int] = 1024,
|
783 |
+
tile_width: Optional[int] = 1024,
|
784 |
+
tile_row_overlap: Optional[int] = 128,
|
785 |
+
tile_col_overlap: Optional[int] = 128,
|
786 |
+
guidance_scale_tiles: Optional[List[List[float]]] = None,
|
787 |
+
seed_tiles: Optional[List[List[int]]] = None,
|
788 |
+
seed_tiles_mode: Optional[Union[str, List[List[str]]]] = "full",
|
789 |
+
seed_reroll_regions: Optional[List[Tuple[int, int, int, int, int]]] = None,
|
790 |
+
**kwargs,
|
791 |
+
):
|
792 |
+
r"""
|
793 |
+
Function invoked when calling the pipeline for generation.
|
794 |
+
|
795 |
+
Args:
|
796 |
+
prompt (`str` or `List[str]`, *optional*):
|
797 |
+
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
798 |
+
instead.
|
799 |
+
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
800 |
+
The height in pixels of the generated image. This is set to 1024 by default for the best results.
|
801 |
+
Anything below 512 pixels won't work well for
|
802 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
803 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
804 |
+
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
805 |
+
The width in pixels of the generated image. This is set to 1024 by default for the best results.
|
806 |
+
Anything below 512 pixels won't work well for
|
807 |
+
[stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
|
808 |
+
and checkpoints that are not specifically fine-tuned on low resolutions.
|
809 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
810 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
811 |
+
expense of slower inference.
|
812 |
+
guidance_scale (`float`, *optional*, defaults to 5.0):
|
813 |
+
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
814 |
+
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
815 |
+
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
816 |
+
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
817 |
+
usually at the expense of lower image quality.
|
818 |
+
negative_prompt (`str` or `List[str]`, *optional*):
|
819 |
+
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
820 |
+
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
821 |
+
less than `1`).
|
822 |
+
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
823 |
+
The number of images to generate per prompt.
|
824 |
+
eta (`float`, *optional*, defaults to 0.0):
|
825 |
+
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
826 |
+
[`schedulers.DDIMScheduler`], will be ignored for others.
|
827 |
+
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
828 |
+
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
829 |
+
to make generation deterministic.
|
830 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
831 |
+
The output format of the generate image. Choose between
|
832 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
833 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
834 |
+
Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
|
835 |
+
of a plain tuple.
|
836 |
+
cross_attention_kwargs (`dict`, *optional*):
|
837 |
+
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
838 |
+
`self.processor` in
|
839 |
+
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
840 |
+
original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
841 |
+
If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
|
842 |
+
`original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
|
843 |
+
explained in section 2.2 of
|
844 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
845 |
+
crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
846 |
+
`crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
|
847 |
+
`crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
|
848 |
+
`crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
|
849 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
850 |
+
target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
851 |
+
For most cases, `target_size` should be set to the desired height and width of the generated image. If
|
852 |
+
not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in
|
853 |
+
section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
|
854 |
+
negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
855 |
+
To negatively condition the generation process based on a specific image resolution. Part of SDXL's
|
856 |
+
micro-conditioning as explained in section 2.2 of
|
857 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
858 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
859 |
+
negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
|
860 |
+
To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's
|
861 |
+
micro-conditioning as explained in section 2.2 of
|
862 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
863 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
864 |
+
negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
|
865 |
+
To negatively condition the generation process based on a target image resolution. It should be as same
|
866 |
+
as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of
|
867 |
+
[https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more
|
868 |
+
information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208.
|
869 |
+
tile_height (`int`, *optional*, defaults to 1024):
|
870 |
+
Height of each grid tile in pixels.
|
871 |
+
tile_width (`int`, *optional*, defaults to 1024):
|
872 |
+
Width of each grid tile in pixels.
|
873 |
+
tile_row_overlap (`int`, *optional*, defaults to 128):
|
874 |
+
Number of overlapping pixels between tiles in consecutive rows.
|
875 |
+
tile_col_overlap (`int`, *optional*, defaults to 128):
|
876 |
+
Number of overlapping pixels between tiles in consecutive columns.
|
877 |
+
guidance_scale_tiles (`List[List[float]]`, *optional*):
|
878 |
+
Specific weights for classifier-free guidance in each tile. If `None`, the value provided in `guidance_scale` will be used.
|
879 |
+
seed_tiles (`List[List[int]]`, *optional*):
|
880 |
+
Specific seeds for the initialization latents in each tile. These will override the latents generated for the whole canvas using the standard `generator` parameter.
|
881 |
+
seed_tiles_mode (`Union[str, List[List[str]]]`, *optional*, defaults to `"full"`):
|
882 |
+
Mode for seeding tiles, can be `"full"` or `"exclusive"`. If `"full"`, all the latents affected by the tile will be overridden. If `"exclusive"`, only the latents that are exclusively affected by this tile (and no other tiles) will be overridden.
|
883 |
+
seed_reroll_regions (`List[Tuple[int, int, int, int, int]]`, *optional*):
|
884 |
+
A list of tuples in the form of `(start_row, end_row, start_column, end_column, seed)` defining regions in pixel space for which the latents will be overridden using the given seed. Takes priority over `seed_tiles`.
|
885 |
+
**kwargs (`Dict[str, Any]`, *optional*):
|
886 |
+
Additional optional keyword arguments to be passed to the `unet.__call__` and `scheduler.step` functions.
|
887 |
+
|
888 |
+
Examples:
|
889 |
+
|
890 |
+
Returns:
|
891 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLTilingPipelineOutput`] or `tuple`:
|
892 |
+
[`~pipelines.stable_diffusion_xl.StableDiffusionXLTilingPipelineOutput`] if `return_dict` is True, otherwise a
|
893 |
+
`tuple`. When returning a tuple, the first element is a list with the generated images.
|
894 |
+
"""
|
895 |
+
|
896 |
+
# 0. Default height and width to unet
|
897 |
+
height = height or self.default_sample_size * self.vae_scale_factor
|
898 |
+
width = width or self.default_sample_size * self.vae_scale_factor
|
899 |
+
|
900 |
+
original_size = original_size or (height, width)
|
901 |
+
target_size = target_size or (height, width)
|
902 |
+
|
903 |
+
self._guidance_scale = guidance_scale
|
904 |
+
self._clip_skip = clip_skip
|
905 |
+
self._cross_attention_kwargs = cross_attention_kwargs
|
906 |
+
self._interrupt = False
|
907 |
+
|
908 |
+
grid_rows = len(prompt)
|
909 |
+
grid_cols = len(prompt[0])
|
910 |
+
|
911 |
+
tiles_mode = [mode.value for mode in self.SeedTilesMode]
|
912 |
+
|
913 |
+
if isinstance(seed_tiles_mode, str):
|
914 |
+
seed_tiles_mode = [[seed_tiles_mode for _ in range(len(row))] for row in prompt]
|
915 |
+
|
916 |
+
# 1. Check inputs. Raise error if not correct
|
917 |
+
self.check_inputs(
|
918 |
+
prompt,
|
919 |
+
height,
|
920 |
+
width,
|
921 |
+
grid_cols,
|
922 |
+
seed_tiles_mode,
|
923 |
+
tiles_mode,
|
924 |
+
)
|
925 |
+
|
926 |
+
if seed_reroll_regions is None:
|
927 |
+
seed_reroll_regions = []
|
928 |
+
|
929 |
+
batch_size = 1
|
930 |
+
|
931 |
+
device = self._execution_device
|
932 |
+
|
933 |
+
# update height and width tile size and tile overlap size
|
934 |
+
height = tile_height + (grid_rows - 1) * (tile_height - tile_row_overlap)
|
935 |
+
width = tile_width + (grid_cols - 1) * (tile_width - tile_col_overlap)
|
936 |
+
|
937 |
+
# 3. Encode input prompt
|
938 |
+
lora_scale = (
|
939 |
+
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
|
940 |
+
)
|
941 |
+
text_embeddings = [
|
942 |
+
[
|
943 |
+
self.encode_prompt(
|
944 |
+
prompt=col,
|
945 |
+
device=device,
|
946 |
+
num_images_per_prompt=num_images_per_prompt,
|
947 |
+
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
948 |
+
negative_prompt=negative_prompt,
|
949 |
+
prompt_embeds=None,
|
950 |
+
negative_prompt_embeds=None,
|
951 |
+
pooled_prompt_embeds=None,
|
952 |
+
negative_pooled_prompt_embeds=None,
|
953 |
+
lora_scale=lora_scale,
|
954 |
+
clip_skip=self.clip_skip,
|
955 |
+
)
|
956 |
+
for col in row
|
957 |
+
]
|
958 |
+
for row in prompt
|
959 |
+
]
|
960 |
+
|
961 |
+
# 3. Prepare latents
|
962 |
+
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
|
963 |
+
dtype = text_embeddings[0][0][0].dtype
|
964 |
+
latents = randn_tensor(latents_shape, generator=generator, device=device, dtype=dtype)
|
965 |
+
|
966 |
+
# 3.1 overwrite latents for specific tiles if provided
|
967 |
+
if seed_tiles is not None:
|
968 |
+
for row in range(grid_rows):
|
969 |
+
for col in range(grid_cols):
|
970 |
+
if (seed_tile := seed_tiles[row][col]) is not None:
|
971 |
+
mode = seed_tiles_mode[row][col]
|
972 |
+
if mode == self.SeedTilesMode.FULL.value:
|
973 |
+
row_init, row_end, col_init, col_end = _tile2latent_indices(
|
974 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
975 |
+
)
|
976 |
+
else:
|
977 |
+
row_init, row_end, col_init, col_end = _tile2latent_exclusive_indices(
|
978 |
+
row,
|
979 |
+
col,
|
980 |
+
tile_width,
|
981 |
+
tile_height,
|
982 |
+
tile_row_overlap,
|
983 |
+
tile_col_overlap,
|
984 |
+
grid_rows,
|
985 |
+
grid_cols,
|
986 |
+
)
|
987 |
+
tile_generator = torch.Generator(device).manual_seed(seed_tile)
|
988 |
+
tile_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
989 |
+
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
990 |
+
tile_shape, generator=tile_generator, device=device
|
991 |
+
)
|
992 |
+
|
993 |
+
# 3.2 overwrite again for seed reroll regions
|
994 |
+
for row_init, row_end, col_init, col_end, seed_reroll in seed_reroll_regions:
|
995 |
+
row_init, row_end, col_init, col_end = _pixel2latent_indices(
|
996 |
+
row_init, row_end, col_init, col_end
|
997 |
+
) # to latent space coordinates
|
998 |
+
reroll_generator = torch.Generator(device).manual_seed(seed_reroll)
|
999 |
+
region_shape = (latents_shape[0], latents_shape[1], row_end - row_init, col_end - col_init)
|
1000 |
+
latents[:, :, row_init:row_end, col_init:col_end] = torch.randn(
|
1001 |
+
region_shape, generator=reroll_generator, device=device
|
1002 |
+
)
|
1003 |
+
|
1004 |
+
# 4. Prepare timesteps
|
1005 |
+
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
|
1006 |
+
extra_set_kwargs = {}
|
1007 |
+
if accepts_offset:
|
1008 |
+
extra_set_kwargs["offset"] = 1
|
1009 |
+
timesteps, num_inference_steps = retrieve_timesteps(
|
1010 |
+
self.scheduler, num_inference_steps, device, None, None, **extra_set_kwargs
|
1011 |
+
)
|
1012 |
+
|
1013 |
+
# if we use LMSDiscreteScheduler, let's make sure latents are multiplied by sigmas
|
1014 |
+
if isinstance(self.scheduler, LMSDiscreteScheduler):
|
1015 |
+
latents = latents * self.scheduler.sigmas[0]
|
1016 |
+
|
1017 |
+
# 5. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
1018 |
+
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
1019 |
+
|
1020 |
+
# 6. Prepare added time ids & embeddings
|
1021 |
+
# text_embeddings order: prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
|
1022 |
+
embeddings_and_added_time = []
|
1023 |
+
for row in range(grid_rows):
|
1024 |
+
addition_embed_type_row = []
|
1025 |
+
for col in range(grid_cols):
|
1026 |
+
#extract generated values
|
1027 |
+
prompt_embeds = text_embeddings[row][col][0]
|
1028 |
+
negative_prompt_embeds = text_embeddings[row][col][1]
|
1029 |
+
pooled_prompt_embeds = text_embeddings[row][col][2]
|
1030 |
+
negative_pooled_prompt_embeds = text_embeddings[row][col][3]
|
1031 |
+
|
1032 |
+
add_text_embeds = pooled_prompt_embeds
|
1033 |
+
if self.text_encoder_2 is None:
|
1034 |
+
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
|
1035 |
+
else:
|
1036 |
+
text_encoder_projection_dim = self.text_encoder_2.config.projection_dim
|
1037 |
+
add_time_ids = self._get_add_time_ids(
|
1038 |
+
original_size,
|
1039 |
+
crops_coords_top_left,
|
1040 |
+
target_size,
|
1041 |
+
dtype=prompt_embeds.dtype,
|
1042 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
1043 |
+
)
|
1044 |
+
if negative_original_size is not None and negative_target_size is not None:
|
1045 |
+
negative_add_time_ids = self._get_add_time_ids(
|
1046 |
+
negative_original_size,
|
1047 |
+
negative_crops_coords_top_left,
|
1048 |
+
negative_target_size,
|
1049 |
+
dtype=prompt_embeds.dtype,
|
1050 |
+
text_encoder_projection_dim=text_encoder_projection_dim,
|
1051 |
+
)
|
1052 |
+
else:
|
1053 |
+
negative_add_time_ids = add_time_ids
|
1054 |
+
|
1055 |
+
if self.do_classifier_free_guidance:
|
1056 |
+
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
1057 |
+
add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)
|
1058 |
+
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0)
|
1059 |
+
|
1060 |
+
prompt_embeds = prompt_embeds.to(device)
|
1061 |
+
add_text_embeds = add_text_embeds.to(device)
|
1062 |
+
add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1)
|
1063 |
+
addition_embed_type_row.append((prompt_embeds, add_text_embeds, add_time_ids))
|
1064 |
+
embeddings_and_added_time.append(addition_embed_type_row)
|
1065 |
+
|
1066 |
+
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
1067 |
+
|
1068 |
+
# 7. Mask for tile weights strength
|
1069 |
+
tile_weights = self._gaussian_weights(tile_width, tile_height, batch_size, device, torch.float32)
|
1070 |
+
|
1071 |
+
# 8. Denoising loop
|
1072 |
+
self._num_timesteps = len(timesteps)
|
1073 |
+
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
1074 |
+
for i, t in enumerate(timesteps):
|
1075 |
+
# Diffuse each tile
|
1076 |
+
noise_preds = []
|
1077 |
+
for row in range(grid_rows):
|
1078 |
+
noise_preds_row = []
|
1079 |
+
for col in range(grid_cols):
|
1080 |
+
if self.interrupt:
|
1081 |
+
continue
|
1082 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
1083 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
1084 |
+
)
|
1085 |
+
tile_latents = latents[:, :, px_row_init:px_row_end, px_col_init:px_col_end]
|
1086 |
+
# expand the latents if we are doing classifier free guidance
|
1087 |
+
latent_model_input = torch.cat([tile_latents] * 2) if self.do_classifier_free_guidance else latents
|
1088 |
+
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
1089 |
+
|
1090 |
+
# predict the noise residual
|
1091 |
+
added_cond_kwargs = {"text_embeds": embeddings_and_added_time[row][col][1], "time_ids": embeddings_and_added_time[row][col][2]}
|
1092 |
+
with torch.amp.autocast(device.type, dtype=dtype, enabled=dtype!=self.unet.dtype):
|
1093 |
+
noise_pred = self.unet(
|
1094 |
+
latent_model_input,
|
1095 |
+
t,
|
1096 |
+
encoder_hidden_states=embeddings_and_added_time[row][col][0],
|
1097 |
+
cross_attention_kwargs=self.cross_attention_kwargs,
|
1098 |
+
added_cond_kwargs=added_cond_kwargs,
|
1099 |
+
return_dict=False,
|
1100 |
+
)[0]
|
1101 |
+
|
1102 |
+
# perform guidance
|
1103 |
+
if self.do_classifier_free_guidance:
|
1104 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1105 |
+
guidance = (
|
1106 |
+
guidance_scale
|
1107 |
+
if guidance_scale_tiles is None or guidance_scale_tiles[row][col] is None
|
1108 |
+
else guidance_scale_tiles[row][col]
|
1109 |
+
)
|
1110 |
+
noise_pred_tile = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond)
|
1111 |
+
noise_preds_row.append(noise_pred_tile)
|
1112 |
+
noise_preds.append(noise_preds_row)
|
1113 |
+
|
1114 |
+
# Stitch noise predictions for all tiles
|
1115 |
+
noise_pred = torch.zeros(latents.shape, device=device)
|
1116 |
+
contributors = torch.zeros(latents.shape, device=device)
|
1117 |
+
|
1118 |
+
# Add each tile contribution to overall latents
|
1119 |
+
for row in range(grid_rows):
|
1120 |
+
for col in range(grid_cols):
|
1121 |
+
px_row_init, px_row_end, px_col_init, px_col_end = _tile2latent_indices(
|
1122 |
+
row, col, tile_width, tile_height, tile_row_overlap, tile_col_overlap
|
1123 |
+
)
|
1124 |
+
noise_pred[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += (
|
1125 |
+
noise_preds[row][col] * tile_weights
|
1126 |
+
)
|
1127 |
+
contributors[:, :, px_row_init:px_row_end, px_col_init:px_col_end] += tile_weights
|
1128 |
+
|
1129 |
+
# Average overlapping areas with more than 1 contributor
|
1130 |
+
noise_pred /= contributors
|
1131 |
+
noise_pred = noise_pred.to(dtype)
|
1132 |
+
|
1133 |
+
# compute the previous noisy sample x_t -> x_t-1
|
1134 |
+
latents_dtype = latents.dtype
|
1135 |
+
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
|
1136 |
+
if latents.dtype != latents_dtype:
|
1137 |
+
if torch.backends.mps.is_available():
|
1138 |
+
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
1139 |
+
latents = latents.to(latents_dtype)
|
1140 |
+
|
1141 |
+
# update progress bar
|
1142 |
+
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
1143 |
+
progress_bar.update()
|
1144 |
+
|
1145 |
+
if XLA_AVAILABLE:
|
1146 |
+
xm.mark_step()
|
1147 |
+
|
1148 |
+
if not output_type == "latent":
|
1149 |
+
# make sure the VAE is in float32 mode, as it overflows in float16
|
1150 |
+
needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast
|
1151 |
+
|
1152 |
+
if needs_upcasting:
|
1153 |
+
self.upcast_vae()
|
1154 |
+
latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)
|
1155 |
+
elif latents.dtype != self.vae.dtype:
|
1156 |
+
if torch.backends.mps.is_available():
|
1157 |
+
# some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
|
1158 |
+
self.vae = self.vae.to(latents.dtype)
|
1159 |
+
|
1160 |
+
# unscale/denormalize the latents
|
1161 |
+
# denormalize with the mean and std if available and not None
|
1162 |
+
has_latents_mean = hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None
|
1163 |
+
has_latents_std = hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None
|
1164 |
+
if has_latents_mean and has_latents_std:
|
1165 |
+
latents_mean = (
|
1166 |
+
torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1).to(latents.device, latents.dtype)
|
1167 |
+
)
|
1168 |
+
latents_std = (
|
1169 |
+
torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1).to(latents.device, latents.dtype)
|
1170 |
+
)
|
1171 |
+
latents = latents * latents_std / self.vae.config.scaling_factor + latents_mean
|
1172 |
+
else:
|
1173 |
+
latents = latents / self.vae.config.scaling_factor
|
1174 |
+
|
1175 |
+
image = self.vae.decode(latents, return_dict=False)[0]
|
1176 |
+
|
1177 |
+
# cast back to fp16 if needed
|
1178 |
+
if needs_upcasting:
|
1179 |
+
self.vae.to(dtype=torch.float16)
|
1180 |
+
else:
|
1181 |
+
image = latents
|
1182 |
+
|
1183 |
+
if not output_type == "latent":
|
1184 |
+
# apply watermark if available
|
1185 |
+
if self.watermark is not None:
|
1186 |
+
image = self.watermark.apply_watermark(image)
|
1187 |
+
|
1188 |
+
image = self.image_processor.postprocess(image, output_type=output_type)
|
1189 |
+
|
1190 |
+
# Offload all models
|
1191 |
+
self.maybe_free_model_hooks()
|
1192 |
+
|
1193 |
+
if not return_dict:
|
1194 |
+
return (image,)
|
1195 |
+
|
1196 |
+
return StableDiffusionXLPipelineOutput(images=image)
|
1197 |
+
|
1198 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
spaces
|
3 |
+
scipy
|
4 |
+
gradio==5.15.0
|
5 |
+
numpy==1.26.4
|
6 |
+
transformers
|
7 |
+
accelerate
|
8 |
+
diffusers
|
9 |
+
fastapi>=0.115.2
|
10 |
+
ligo-segments
|