1inkusFace commited on
Commit
01e2f35
·
verified ·
1 Parent(s): 3295a48

Update pipeline_stable_diffusion_3_ipa.py

Browse files
Files changed (1) hide show
  1. pipeline_stable_diffusion_3_ipa.py +4 -10
pipeline_stable_diffusion_3_ipa.py CHANGED
@@ -864,18 +864,12 @@ class StableDiffusion3Pipeline(DiffusionPipeline, SD3LoraLoaderMixin, FromSingle
864
 
865
  @torch.inference_mode()
866
  def init_ipadapter(self, ip_adapter_path, image_encoder_path, nb_token, output_dim=2432):
867
-
868
  state_dict = torch.load(ip_adapter_path, map_location="cpu")
 
869
  device, dtype = self.transformer.device, self.transformer.dtype
870
- if image_encoder_path=='laion/CLIP-ViT-H-14-laion2B-s32B-b79K':
871
- from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor
872
- image_encoder = CLIPVisionModelWithProjection.from_pretrained(image_encoder_path)
873
- image_processor = CLIPImageProcessor.from_pretrained(image_encoder_path)
874
- else:
875
- from transformers import SiglipVisionModel, SiglipImageProcessor
876
- image_encoder = SiglipVisionModel.from_pretrained(image_encoder_path)
877
- image_processor = SiglipImageProcessor.from_pretrained(image_encoder_path)
878
-
879
  image_encoder.eval()
880
  image_encoder.to(device, dtype=dtype)
881
  self.image_encoder = image_encoder
 
864
 
865
  @torch.inference_mode()
866
  def init_ipadapter(self, ip_adapter_path, image_encoder_path, nb_token, output_dim=2432):
867
+ from transformers import SiglipVisionModel, SiglipImageProcessor
868
  state_dict = torch.load(ip_adapter_path, map_location="cpu")
869
+
870
  device, dtype = self.transformer.device, self.transformer.dtype
871
+ image_encoder = SiglipVisionModel.from_pretrained(image_encoder_path)
872
+ image_processor = SiglipImageProcessor.from_pretrained(image_encoder_path)
 
 
 
 
 
 
 
873
  image_encoder.eval()
874
  image_encoder.to(device, dtype=dtype)
875
  self.image_encoder = image_encoder