Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
dbdf7bd
·
verified ·
1 Parent(s): e91bdef

Upload folder using huggingface_hub

Browse files
main/README.md CHANGED
@@ -3379,6 +3379,20 @@ best quality, 3persons in garden, a boy blue shirt BREAK
3379
  best quality, 3persons in garden, an old man red suit
3380
  ```
3381
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3382
  ### Negative prompt
3383
 
3384
  Negative prompts are equally effective across all regions, but it is possible to set region-specific prompts for negative prompts as well. The number of BREAKs must be the same as the number of prompts. If the number of prompts does not match, the negative prompts will be used without being divided into regions.
@@ -3409,6 +3423,7 @@ pipe(prompt=prompt, rp_args=rp_args)
3409
  ### Optional Parameters
3410
 
3411
  - `save_mask`: In `Prompt` mode, choose whether to output the generated mask along with the image. The default is `False`.
 
3412
 
3413
  The Pipeline supports `compel` syntax. Input prompts using the `compel` structure will be automatically applied and processed.
3414
 
 
3379
  best quality, 3persons in garden, an old man red suit
3380
  ```
3381
 
3382
+ ### Use base prompt
3383
+
3384
+ You can use a base prompt to apply the prompt to all areas. You can set a base prompt by adding `ADDBASE` at the end. Base prompts can also be combined with common prompts, but the base prompt must be specified first.
3385
+
3386
+ ```
3387
+ 2d animation style ADDBASE
3388
+ masterpiece, high quality ADDCOMM
3389
+ (blue sky)++ BREAK
3390
+ green hair twintail BREAK
3391
+ book shelf BREAK
3392
+ messy desk BREAK
3393
+ orange++ dress and sofa
3394
+ ```
3395
+
3396
  ### Negative prompt
3397
 
3398
  Negative prompts are equally effective across all regions, but it is possible to set region-specific prompts for negative prompts as well. The number of BREAKs must be the same as the number of prompts. If the number of prompts does not match, the negative prompts will be used without being divided into regions.
 
3423
  ### Optional Parameters
3424
 
3425
  - `save_mask`: In `Prompt` mode, choose whether to output the generated mask along with the image. The default is `False`.
3426
+ - `base_ratio`: Used with `ADDBASE`. Sets the ratio of the base prompt; if base ratio is set to 0.2, then resulting images will consist of `20%*BASE_PROMPT + 80%*REGION_PROMPT`
3427
 
3428
  The Pipeline supports `compel` syntax. Input prompts using the `compel` structure will be automatically applied and processed.
3429
 
main/regional_prompting_stable_diffusion.py CHANGED
@@ -3,13 +3,12 @@ from typing import Dict, Optional
3
 
4
  import torch
5
  import torchvision.transforms.functional as FF
6
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
7
 
8
  from diffusers import StableDiffusionPipeline
9
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
10
  from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
11
  from diffusers.schedulers import KarrasDiffusionSchedulers
12
- from diffusers.utils import USE_PEFT_BACKEND
13
 
14
 
15
  try:
@@ -17,6 +16,7 @@ try:
17
  except ImportError:
18
  Compel = None
19
 
 
20
  KCOMM = "ADDCOMM"
21
  KBRK = "BREAK"
22
 
@@ -34,6 +34,11 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
34
 
35
  Optional
36
  rp_args["save_mask"]: True/False (save masks in prompt mode)
 
 
 
 
 
37
 
38
  Pipeline for text-to-image generation using Stable Diffusion.
39
 
@@ -70,6 +75,7 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
70
  scheduler: KarrasDiffusionSchedulers,
71
  safety_checker: StableDiffusionSafetyChecker,
72
  feature_extractor: CLIPImageProcessor,
 
73
  requires_safety_checker: bool = True,
74
  ):
75
  super().__init__(
@@ -80,6 +86,7 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
80
  scheduler,
81
  safety_checker,
82
  feature_extractor,
 
83
  requires_safety_checker,
84
  )
85
  self.register_modules(
@@ -90,6 +97,7 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
90
  scheduler=scheduler,
91
  safety_checker=safety_checker,
92
  feature_extractor=feature_extractor,
 
93
  )
94
 
95
  @torch.no_grad()
@@ -110,17 +118,40 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
110
  rp_args: Dict[str, str] = None,
111
  ):
112
  active = KBRK in prompt[0] if isinstance(prompt, list) else KBRK in prompt
 
113
  if negative_prompt is None:
114
  negative_prompt = "" if isinstance(prompt, str) else [""] * len(prompt)
115
 
116
  device = self._execution_device
117
  regions = 0
118
 
 
119
  self.power = int(rp_args["power"]) if "power" in rp_args else 1
120
 
121
  prompts = prompt if isinstance(prompt, list) else [prompt]
122
- n_prompts = negative_prompt if isinstance(prompt, str) else [negative_prompt]
123
  self.batch = batch = num_images_per_prompt * len(prompts)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  all_prompts_cn, all_prompts_p = promptsmaker(prompts, num_images_per_prompt)
125
  all_n_prompts_cn, _ = promptsmaker(n_prompts, num_images_per_prompt)
126
 
@@ -137,8 +168,16 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
137
 
138
  conds = getcompelembs(all_prompts_cn)
139
  unconds = getcompelembs(all_n_prompts_cn)
140
- embs = getcompelembs(prompts)
141
- n_embs = getcompelembs(n_prompts)
 
 
 
 
 
 
 
 
142
  prompt = negative_prompt = None
143
  else:
144
  conds = self.encode_prompt(prompts, device, 1, True)[0]
@@ -147,6 +186,18 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
147
  if equal
148
  else self.encode_prompt(all_n_prompts_cn, device, 1, True)[0]
149
  )
 
 
 
 
 
 
 
 
 
 
 
 
150
  embs = n_embs = None
151
 
152
  if not active:
@@ -225,8 +276,6 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
225
 
226
  residual = hidden_states
227
 
228
- args = () if USE_PEFT_BACKEND else (scale,)
229
-
230
  if attn.spatial_norm is not None:
231
  hidden_states = attn.spatial_norm(hidden_states, temb)
232
 
@@ -247,16 +296,15 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
247
  if attn.group_norm is not None:
248
  hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
249
 
250
- args = () if USE_PEFT_BACKEND else (scale,)
251
- query = attn.to_q(hidden_states, *args)
252
 
253
  if encoder_hidden_states is None:
254
  encoder_hidden_states = hidden_states
255
  elif attn.norm_cross:
256
  encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
257
 
258
- key = attn.to_k(encoder_hidden_states, *args)
259
- value = attn.to_v(encoder_hidden_states, *args)
260
 
261
  inner_dim = key.shape[-1]
262
  head_dim = inner_dim // attn.heads
@@ -283,7 +331,7 @@ class RegionalPromptingStableDiffusionPipeline(StableDiffusionPipeline):
283
  hidden_states = hidden_states.to(query.dtype)
284
 
285
  # linear proj
286
- hidden_states = attn.to_out[0](hidden_states, *args)
287
  # dropout
288
  hidden_states = attn.to_out[1](hidden_states)
289
 
@@ -410,9 +458,9 @@ def promptsmaker(prompts, batch):
410
  add = ""
411
  if KCOMM in prompt:
412
  add, prompt = prompt.split(KCOMM)
413
- add = add + " "
414
- prompts = prompt.split(KBRK)
415
- out_p.append([add + p for p in prompts])
416
  out = [None] * batch * len(out_p[0]) * len(out_p)
417
  for p, prs in enumerate(out_p): # inputs prompts
418
  for r, pr in enumerate(prs): # prompts for regions
@@ -449,7 +497,6 @@ def make_cells(ratios):
449
  add = []
450
  startend(add, inratios[1:])
451
  icells.append(add)
452
-
453
  return ocells, icells, sum(len(cell) for cell in icells)
454
 
455
 
 
3
 
4
  import torch
5
  import torchvision.transforms.functional as FF
6
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
7
 
8
  from diffusers import StableDiffusionPipeline
9
  from diffusers.models import AutoencoderKL, UNet2DConditionModel
10
  from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
11
  from diffusers.schedulers import KarrasDiffusionSchedulers
 
12
 
13
 
14
  try:
 
16
  except ImportError:
17
  Compel = None
18
 
19
+ KBASE = "ADDBASE"
20
  KCOMM = "ADDCOMM"
21
  KBRK = "BREAK"
22
 
 
34
 
35
  Optional
36
  rp_args["save_mask"]: True/False (save masks in prompt mode)
37
+ rp_args["power"]: int (power for attention maps in prompt mode)
38
+ rp_args["base_ratio"]:
39
+ float (Sets the ratio of the base prompt)
40
+ ex) 0.2 (20%*BASE_PROMPT + 80%*REGION_PROMPT)
41
+ [Use base prompt](https://github.com/hako-mikan/sd-webui-regional-prompter?tab=readme-ov-file#use-base-prompt)
42
 
43
  Pipeline for text-to-image generation using Stable Diffusion.
44
 
 
75
  scheduler: KarrasDiffusionSchedulers,
76
  safety_checker: StableDiffusionSafetyChecker,
77
  feature_extractor: CLIPImageProcessor,
78
+ image_encoder: CLIPVisionModelWithProjection = None,
79
  requires_safety_checker: bool = True,
80
  ):
81
  super().__init__(
 
86
  scheduler,
87
  safety_checker,
88
  feature_extractor,
89
+ image_encoder,
90
  requires_safety_checker,
91
  )
92
  self.register_modules(
 
97
  scheduler=scheduler,
98
  safety_checker=safety_checker,
99
  feature_extractor=feature_extractor,
100
+ image_encoder=image_encoder,
101
  )
102
 
103
  @torch.no_grad()
 
118
  rp_args: Dict[str, str] = None,
119
  ):
120
  active = KBRK in prompt[0] if isinstance(prompt, list) else KBRK in prompt
121
+ use_base = KBASE in prompt[0] if isinstance(prompt, list) else KBASE in prompt
122
  if negative_prompt is None:
123
  negative_prompt = "" if isinstance(prompt, str) else [""] * len(prompt)
124
 
125
  device = self._execution_device
126
  regions = 0
127
 
128
+ self.base_ratio = float(rp_args["base_ratio"]) if "base_ratio" in rp_args else 0.0
129
  self.power = int(rp_args["power"]) if "power" in rp_args else 1
130
 
131
  prompts = prompt if isinstance(prompt, list) else [prompt]
132
+ n_prompts = negative_prompt if isinstance(prompt, list) else [negative_prompt]
133
  self.batch = batch = num_images_per_prompt * len(prompts)
134
+
135
+ if use_base:
136
+ bases = prompts.copy()
137
+ n_bases = n_prompts.copy()
138
+
139
+ for i, prompt in enumerate(prompts):
140
+ parts = prompt.split(KBASE)
141
+ if len(parts) == 2:
142
+ bases[i], prompts[i] = parts
143
+ elif len(parts) > 2:
144
+ raise ValueError(f"Multiple instances of {KBASE} found in prompt: {prompt}")
145
+ for i, prompt in enumerate(n_prompts):
146
+ n_parts = prompt.split(KBASE)
147
+ if len(n_parts) == 2:
148
+ n_bases[i], n_prompts[i] = n_parts
149
+ elif len(n_parts) > 2:
150
+ raise ValueError(f"Multiple instances of {KBASE} found in negative prompt: {prompt}")
151
+
152
+ all_bases_cn, _ = promptsmaker(bases, num_images_per_prompt)
153
+ all_n_bases_cn, _ = promptsmaker(n_bases, num_images_per_prompt)
154
+
155
  all_prompts_cn, all_prompts_p = promptsmaker(prompts, num_images_per_prompt)
156
  all_n_prompts_cn, _ = promptsmaker(n_prompts, num_images_per_prompt)
157
 
 
168
 
169
  conds = getcompelembs(all_prompts_cn)
170
  unconds = getcompelembs(all_n_prompts_cn)
171
+ base_embs = getcompelembs(all_bases_cn) if use_base else None
172
+ base_n_embs = getcompelembs(all_n_bases_cn) if use_base else None
173
+ # When using base, it seems more reasonable to use base prompts as prompt_embeddings rather than regional prompts
174
+ embs = getcompelembs(prompts) if not use_base else base_embs
175
+ n_embs = getcompelembs(n_prompts) if not use_base else base_n_embs
176
+
177
+ if use_base and self.base_ratio > 0:
178
+ conds = self.base_ratio * base_embs + (1 - self.base_ratio) * conds
179
+ unconds = self.base_ratio * base_n_embs + (1 - self.base_ratio) * unconds
180
+
181
  prompt = negative_prompt = None
182
  else:
183
  conds = self.encode_prompt(prompts, device, 1, True)[0]
 
186
  if equal
187
  else self.encode_prompt(all_n_prompts_cn, device, 1, True)[0]
188
  )
189
+
190
+ if use_base and self.base_ratio > 0:
191
+ base_embs = self.encode_prompt(bases, device, 1, True)[0]
192
+ base_n_embs = (
193
+ self.encode_prompt(n_bases, device, 1, True)[0]
194
+ if equal
195
+ else self.encode_prompt(all_n_bases_cn, device, 1, True)[0]
196
+ )
197
+
198
+ conds = self.base_ratio * base_embs + (1 - self.base_ratio) * conds
199
+ unconds = self.base_ratio * base_n_embs + (1 - self.base_ratio) * unconds
200
+
201
  embs = n_embs = None
202
 
203
  if not active:
 
276
 
277
  residual = hidden_states
278
 
 
 
279
  if attn.spatial_norm is not None:
280
  hidden_states = attn.spatial_norm(hidden_states, temb)
281
 
 
296
  if attn.group_norm is not None:
297
  hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
298
 
299
+ query = attn.to_q(hidden_states)
 
300
 
301
  if encoder_hidden_states is None:
302
  encoder_hidden_states = hidden_states
303
  elif attn.norm_cross:
304
  encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
305
 
306
+ key = attn.to_k(encoder_hidden_states)
307
+ value = attn.to_v(encoder_hidden_states)
308
 
309
  inner_dim = key.shape[-1]
310
  head_dim = inner_dim // attn.heads
 
331
  hidden_states = hidden_states.to(query.dtype)
332
 
333
  # linear proj
334
+ hidden_states = attn.to_out[0](hidden_states)
335
  # dropout
336
  hidden_states = attn.to_out[1](hidden_states)
337
 
 
458
  add = ""
459
  if KCOMM in prompt:
460
  add, prompt = prompt.split(KCOMM)
461
+ add = add.strip() + " "
462
+ prompts = [p.strip() for p in prompt.split(KBRK)]
463
+ out_p.append([add + p for i, p in enumerate(prompts)])
464
  out = [None] * batch * len(out_p[0]) * len(out_p)
465
  for p, prs in enumerate(out_p): # inputs prompts
466
  for r, pr in enumerate(prs): # prompts for regions
 
497
  add = []
498
  startend(add, inratios[1:])
499
  icells.append(add)
 
500
  return ocells, icells, sum(len(cell) for cell in icells)
501
 
502