Nupur Kumari commited on
Commit
db5f8d8
·
1 Parent(s): 084b2e4

negative prompt

Browse files
app.py CHANGED
@@ -73,7 +73,7 @@ def encode_target_images(images, pipeline):
73
 
74
 
75
  @spaces.GPU(duration=120)
76
- def generate_image(text, img1, img2, img3, guidance_scale, inference_steps, seed, rigid_object, enable_cpu_offload=False):
77
  if enable_cpu_offload:
78
  pipeline.enable_sequential_cpu_offload()
79
  input_images = [img1, img2, img3]
@@ -104,6 +104,9 @@ def generate_image(text, img1, img2, img3, guidance_scale, inference_steps, seed
104
  generator = torch.Generator(device="cpu").manual_seed(seed),
105
  joint_attention_kwargs={'shared_attn': True, 'num': numref},
106
  return_dict=False,
 
 
 
107
  )[0][0]
108
  output = rearrange(output, "b c h (n w) -> (b n) c h w", n=numref)[::numref]
109
  img = Image.fromarray( (( torch.clip(output[0].float(), -1., 1.).permute(1,2,0).cpu().numpy()*0.5+0.5)*255).astype(np.uint8) )
@@ -120,7 +123,10 @@ def get_example():
120
  "./imgs/test_cases/action_figure/2.jpg",
121
  3.5,
122
  42,
123
- True,
 
 
 
124
  ],
125
  [
126
  "A penguin plushie wearing pink sunglasses is lounging on a beach. Realistic shot.",
@@ -129,7 +135,10 @@ def get_example():
129
  "./imgs/test_cases/penguin/2.jpg",
130
  3.5,
131
  42,
132
- True,
 
 
 
133
  ],
134
  [
135
  "A toy on a beach. Waves in the background. Realistic shot.",
@@ -138,16 +147,19 @@ def get_example():
138
  "./imgs/test_cases/rc_car/04.jpg",
139
  3.5,
140
  42,
141
- True,
 
 
 
142
  ],
143
  ]
144
  return case
145
 
146
- def run_for_examples(text, img1, img2, img3, guidance_scale, seed, rigid_object, enable_cpu_offload=False):
147
  inference_steps = 30
148
 
149
  return generate_image(
150
- text, img1, img2, img3, guidance_scale, inference_steps, seed, rigid_object, enable_cpu_offload
151
  )
152
 
153
  description = """
@@ -210,13 +222,17 @@ with gr.Blocks() as demo:
210
  label="Seed", minimum=0, maximum=2147483647, value=42, step=1
211
  )
212
 
213
- rigid_object = gr.Checkbox(
214
- label="rigid_object", info="Whether its a rigid object or a deformable object like pet animals, wearable etc.", value=True,
215
- )
216
  enable_cpu_offload = gr.Checkbox(
217
  label="Enable CPU Offload", info="Enable CPU Offload to avoid memory issues", value=False,
218
  )
219
 
 
 
 
 
 
 
 
220
  # generate
221
  generate_button = gr.Button("Generate Image")
222
 
@@ -236,8 +252,10 @@ with gr.Blocks() as demo:
236
  guidance_scale_input,
237
  num_inference_steps,
238
  seed_input,
239
- rigid_object,
240
  enable_cpu_offload,
 
 
 
241
  ],
242
  outputs=output_image,
243
  )
@@ -252,7 +270,10 @@ with gr.Blocks() as demo:
252
  image_input_3,
253
  guidance_scale_input,
254
  seed_input,
255
- rigid_object,
 
 
 
256
  ],
257
  outputs=output_image,
258
  )
 
73
 
74
 
75
  @spaces.GPU(duration=120)
76
+ def generate_image(text, img1, img2, img3, guidance_scale, inference_steps, seed, enable_cpu_offload=False, neg_prompt="", true_cfg=1.0, image_cfg=0.0):
77
  if enable_cpu_offload:
78
  pipeline.enable_sequential_cpu_offload()
79
  input_images = [img1, img2, img3]
 
104
  generator = torch.Generator(device="cpu").manual_seed(seed),
105
  joint_attention_kwargs={'shared_attn': True, 'num': numref},
106
  return_dict=False,
107
+ negative_prompt=neg_prompt,
108
+ true_cfg_scale=true_cfg,
109
+ image_cfg_scale=image_cfg,
110
  )[0][0]
111
  output = rearrange(output, "b c h (n w) -> (b n) c h w", n=numref)[::numref]
112
  img = Image.fromarray( (( torch.clip(output[0].float(), -1., 1.).permute(1,2,0).cpu().numpy()*0.5+0.5)*255).astype(np.uint8) )
 
123
  "./imgs/test_cases/action_figure/2.jpg",
124
  3.5,
125
  42,
126
+ False,
127
+ "",
128
+ 1.0,
129
+ 0.0,
130
  ],
131
  [
132
  "A penguin plushie wearing pink sunglasses is lounging on a beach. Realistic shot.",
 
135
  "./imgs/test_cases/penguin/2.jpg",
136
  3.5,
137
  42,
138
+ False,
139
+ "",
140
+ 1.0,
141
+ 0.0,
142
  ],
143
  [
144
  "A toy on a beach. Waves in the background. Realistic shot.",
 
147
  "./imgs/test_cases/rc_car/04.jpg",
148
  3.5,
149
  42,
150
+ False,
151
+ "",
152
+ 1.0,
153
+ 0.0,
154
  ],
155
  ]
156
  return case
157
 
158
+ def run_for_examples(text, img1, img2, img3, guidance_scale, seed, enable_cpu_offload=False, neg_prompt="", true_cfg=1.0, image_cfg=0.0):
159
  inference_steps = 30
160
 
161
  return generate_image(
162
+ text, img1, img2, img3, guidance_scale, inference_steps, seed, enable_cpu_offload, neg_prompt, true_cfg, image_cfg
163
  )
164
 
165
  description = """
 
222
  label="Seed", minimum=0, maximum=2147483647, value=42, step=1
223
  )
224
 
 
 
 
225
  enable_cpu_offload = gr.Checkbox(
226
  label="Enable CPU Offload", info="Enable CPU Offload to avoid memory issues", value=False,
227
  )
228
 
229
+ with gr.Accordion("Advanced Options (True CFG, true_cfg_scale=1 means use fake CFG, >1 means use true CFG", open=False): # noqa E501
230
+ neg_prompt = gr.Textbox(
231
+ label="Negative Prompt",
232
+ value="")
233
+ true_cfg = gr.Slider(1.0, 10.0, 1.5, step=0.1, label="true CFG. Recommended to be 1.5")
234
+ image_cfg = gr.Slider(0.0, 10.0, 0.0, step=0.1, label="image CFG scale, will increase the image alignment but longer run time and lower text alignment. Recommended to be 1.0")
235
+
236
  # generate
237
  generate_button = gr.Button("Generate Image")
238
 
 
252
  guidance_scale_input,
253
  num_inference_steps,
254
  seed_input,
 
255
  enable_cpu_offload,
256
+ neg_prompt,
257
+ true_cfg,
258
+ image_cfg,
259
  ],
260
  outputs=output_image,
261
  )
 
270
  image_input_3,
271
  guidance_scale_input,
272
  seed_input,
273
+ enable_cpu_offload,
274
+ neg_prompt,
275
+ true_cfg,
276
+ image_cfg,
277
  ],
278
  outputs=output_image,
279
  )
pipelines/flux_pipeline/pipeline.py CHANGED
@@ -98,6 +98,19 @@ def retrieve_timesteps(
98
  return timesteps, num_inference_steps
99
 
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  class SynCDFluxPipeline(FluxPipeline):
102
 
103
  model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
@@ -162,6 +175,7 @@ class SynCDFluxPipeline(FluxPipeline):
162
  latents_ref: Optional[torch.Tensor] = None,
163
  latents_mask: Optional[torch.Tensor] = None,
164
  return_latents: bool=False,
 
165
  ):
166
  r"""
167
  Function invoked when calling the pipeline for generation.
@@ -389,7 +403,7 @@ class SynCDFluxPipeline(FluxPipeline):
389
  return_dict=False,
390
  )[0]
391
 
392
- if do_true_cfg:
393
  neg_noise_pred = self.transformer(
394
  hidden_states=latents,
395
  timestep=timestep / 1000,
@@ -398,10 +412,28 @@ class SynCDFluxPipeline(FluxPipeline):
398
  encoder_hidden_states=negative_prompt_embeds,
399
  txt_ids=text_ids,
400
  img_ids=latent_image_ids,
401
- joint_attention_kwargs=self.joint_attention_kwargs,
402
  return_dict=False,
403
  )[0]
404
- noise_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
 
406
  # compute the previous noisy sample x_t -> x_t-1
407
  latents_dtype = latents.dtype
 
98
  return timesteps, num_inference_steps
99
 
100
 
101
+ def normalized_guidance_image(neg_noise_pred, noise_pred, image_noise_pred, true_cfg_scale, image_cfg_scale):
102
+ diff_img = image_noise_pred - neg_noise_pred
103
+ diff_txt = noise_pred - image_noise_pred
104
+
105
+ diff_norm_txt = diff_txt.norm(p=2, dim=[-1, -2], keepdim=True)
106
+ diff_norm_img = diff_img.norm(p=2, dim=[-1, -2], keepdim=True)
107
+ min_norm = torch.minimum(diff_norm_img, diff_norm_txt)
108
+ diff_txt = diff_txt * torch.minimum(torch.ones_like(diff_txt), min_norm / diff_norm_txt)
109
+ diff_img = diff_img * torch.minimum(torch.ones_like(diff_txt), min_norm / diff_norm_img)
110
+ pred_guided = image_noise_pred + image_cfg_scale * diff_img + true_cfg_scale * diff_txt
111
+ return pred_guided
112
+
113
+
114
  class SynCDFluxPipeline(FluxPipeline):
115
 
116
  model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
 
175
  latents_ref: Optional[torch.Tensor] = None,
176
  latents_mask: Optional[torch.Tensor] = None,
177
  return_latents: bool=False,
178
+ image_cfg_scale: float=0.0,
179
  ):
180
  r"""
181
  Function invoked when calling the pipeline for generation.
 
403
  return_dict=False,
404
  )[0]
405
 
406
+ if do_true_cfg and i>=1:
407
  neg_noise_pred = self.transformer(
408
  hidden_states=latents,
409
  timestep=timestep / 1000,
 
412
  encoder_hidden_states=negative_prompt_embeds,
413
  txt_ids=text_ids,
414
  img_ids=latent_image_ids,
415
+ joint_attention_kwargs={**self.joint_attention_kwargs, 'neg_mode': True},
416
  return_dict=False,
417
  )[0]
418
+
419
+ if image_cfg_scale > 0:
420
+ image_noise_pred = self.transformer(
421
+ hidden_states=latents,
422
+ timestep=timestep / 1000,
423
+ guidance=guidance,
424
+ pooled_projections=negative_pooled_prompt_embeds,
425
+ encoder_hidden_states=negative_prompt_embeds,
426
+ txt_ids=text_ids,
427
+ img_ids=latent_image_ids,
428
+ joint_attention_kwargs=self.joint_attention_kwargs,
429
+ return_dict=False,
430
+ )[0]
431
+
432
+ if image_cfg_scale == 0:
433
+ noise_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred)
434
+ else:
435
+ noise_pred = normalized_guidance_image(neg_noise_pred, noise_pred, image_noise_pred, true_cfg_scale, image_cfg_scale)
436
+
437
 
438
  # compute the previous noisy sample x_t -> x_t-1
439
  latents_dtype = latents.dtype
pipelines/flux_pipeline/transformer.py CHANGED
@@ -79,6 +79,7 @@ class FluxAttnProcessor2_0:
79
  scale: float = 1.0,
80
  timestep: float = 0,
81
  val: bool = False,
 
82
  ) -> torch.FloatTensor:
83
  if mode == 'w': # and single:
84
  ref_dict[self.name] = hidden_states.detach()
@@ -133,7 +134,19 @@ class FluxAttnProcessor2_0:
133
  query = apply_rotary_emb(query, image_rotary_emb)
134
  key = apply_rotary_emb(key, image_rotary_emb)
135
 
136
- hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False, attn_mask=mask if timestep < 1. else None)
 
 
 
 
 
 
 
 
 
 
 
 
137
  hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
138
 
139
  hidden_states = hidden_states.to(query.dtype)
 
79
  scale: float = 1.0,
80
  timestep: float = 0,
81
  val: bool = False,
82
+ neg_mode: bool = False,
83
  ) -> torch.FloatTensor:
84
  if mode == 'w': # and single:
85
  ref_dict[self.name] = hidden_states.detach()
 
134
  query = apply_rotary_emb(query, image_rotary_emb)
135
  key = apply_rotary_emb(key, image_rotary_emb)
136
 
137
+ if neg_mode:
138
+ res = int(math.sqrt((end_of_hidden_states-(text_seq if encoder_hidden_states is None else 0)) // num))
139
+ hw = res*res
140
+ mask_ = torch.ones(1, res, num*res, res, num*res).to(query.device)
141
+ for i in range(num):
142
+ mask_[:, :, i*res:(i+1)*res, :, i*res:(i+1)*res] = 1
143
+ mask_ = rearrange(mask_, "b h w h1 w1 -> b (h w) (h1 w1)")
144
+ mask = torch.ones(1, num*hw + 512, num*hw + 512, device=query.device, dtype=query.dtype)
145
+ mask[:, 512:, 512:] = mask_
146
+ mask = mask.bool()
147
+ mask = rearrange(mask.unsqueeze(0).expand(attn.heads, -1, -1, -1), "nh b ... -> b nh ...")
148
+
149
+ hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False, attn_mask=mask)
150
  hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
151
 
152
  hidden_states = hidden_states.to(query.dtype)