ford442 commited on
Commit
6996882
·
verified ·
1 Parent(s): 7e95f51

Update ip_adapter/ip_adapter.py

Browse files
Files changed (1) hide show
  1. ip_adapter/ip_adapter.py +2 -3
ip_adapter/ip_adapter.py CHANGED
@@ -7,11 +7,9 @@ from diffusers.pipelines.controlnet import MultiControlNetModel
7
  from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
8
  from PIL import Image
9
 
10
- from .utils import is_torch2_available
11
  from .attention_processor import IPAttnProcessor2_0 as IPAttnProcessor, AttnProcessor2_0 as AttnProcessor, CNAttnProcessor2_0 as CNAttnProcessor
12
  from .resampler import Resampler
13
 
14
-
15
  class ImageProjModel(torch.nn.Module):
16
  """Projection Model"""
17
  def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4):
@@ -28,7 +26,6 @@ class ImageProjModel(torch.nn.Module):
28
  clip_extra_context_tokens = self.norm(clip_extra_context_tokens)
29
  return clip_extra_context_tokens
30
 
31
-
32
  class IPAdapter:
33
 
34
  def __init__(self, sd_pipe, image_encoder_path, ip_ckpt, device, num_tokens=4):
@@ -196,6 +193,7 @@ class IPAdapterXL(IPAdapter):
196
  num_samples=4,
197
  seed=-1,
198
  num_inference_steps=30,
 
199
  **kwargs,
200
  ):
201
  self.set_scale(scale)
@@ -243,6 +241,7 @@ class IPAdapterXL(IPAdapter):
243
  pooled_prompt_embeds=pooled_prompt_embeds,
244
  negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
245
  num_inference_steps=num_inference_steps,
 
246
  generator=generator,
247
  **kwargs,
248
  ).images
 
7
  from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
8
  from PIL import Image
9
 
 
10
  from .attention_processor import IPAttnProcessor2_0 as IPAttnProcessor, AttnProcessor2_0 as AttnProcessor, CNAttnProcessor2_0 as CNAttnProcessor
11
  from .resampler import Resampler
12
 
 
13
  class ImageProjModel(torch.nn.Module):
14
  """Projection Model"""
15
  def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4):
 
26
  clip_extra_context_tokens = self.norm(clip_extra_context_tokens)
27
  return clip_extra_context_tokens
28
 
 
29
  class IPAdapter:
30
 
31
  def __init__(self, sd_pipe, image_encoder_path, ip_ckpt, device, num_tokens=4):
 
193
  num_samples=4,
194
  seed=-1,
195
  num_inference_steps=30,
196
+ guidance_scale=7.5,
197
  **kwargs,
198
  ):
199
  self.set_scale(scale)
 
241
  pooled_prompt_embeds=pooled_prompt_embeds,
242
  negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
243
  num_inference_steps=num_inference_steps,
244
+ guidance_scale=guidance_scale,
245
  generator=generator,
246
  **kwargs,
247
  ).images