Spaces:
Running
on
Zero
Running
on
Zero
Update pipeline.py
Browse files- pipeline.py +5 -5
pipeline.py
CHANGED
@@ -33,7 +33,7 @@ EXAMPLE_DOC_STRING = """
|
|
33 |
class InstantCharacterFluxPipeline(FluxPipeline):
|
34 |
|
35 |
|
36 |
-
@torch.
|
37 |
def encode_siglip_image_emb(self, siglip_image, device, dtype):
|
38 |
siglip_image = siglip_image.to(device, dtype=dtype)
|
39 |
res = self.siglip_image_encoder(siglip_image, output_hidden_states=True)
|
@@ -45,7 +45,7 @@ class InstantCharacterFluxPipeline(FluxPipeline):
|
|
45 |
return siglip_image_embeds, siglip_image_shallow_embeds
|
46 |
|
47 |
|
48 |
-
@torch.
|
49 |
def encode_dinov2_image_emb(self, dinov2_image, device, dtype):
|
50 |
dinov2_image = dinov2_image.to(device, dtype=dtype)
|
51 |
res = self.dino_image_encoder_2(dinov2_image, output_hidden_states=True)
|
@@ -57,7 +57,7 @@ class InstantCharacterFluxPipeline(FluxPipeline):
|
|
57 |
return dinov2_image_embeds, dinov2_image_shallow_embeds
|
58 |
|
59 |
|
60 |
-
@torch.
|
61 |
def encode_image_emb(self, siglip_image, device, dtype):
|
62 |
object_image_pil = siglip_image
|
63 |
object_image_pil_low_res = [object_image_pil.resize((384, 384))]
|
@@ -104,7 +104,7 @@ class InstantCharacterFluxPipeline(FluxPipeline):
|
|
104 |
return image_embeds_dict
|
105 |
|
106 |
|
107 |
-
@torch.
|
108 |
def init_ccp_and_attn_processor(self, *args, **kwargs):
|
109 |
subject_ip_adapter_path = kwargs['subject_ip_adapter_path']
|
110 |
nb_token = kwargs['nb_token']
|
@@ -150,7 +150,7 @@ class InstantCharacterFluxPipeline(FluxPipeline):
|
|
150 |
self.subject_image_proj_model = image_proj_model
|
151 |
|
152 |
|
153 |
-
@torch.
|
154 |
def init_adapter(
|
155 |
self,
|
156 |
image_encoder_path=None,
|
|
|
33 |
class InstantCharacterFluxPipeline(FluxPipeline):
|
34 |
|
35 |
|
36 |
+
@torch.no_grad()
|
37 |
def encode_siglip_image_emb(self, siglip_image, device, dtype):
|
38 |
siglip_image = siglip_image.to(device, dtype=dtype)
|
39 |
res = self.siglip_image_encoder(siglip_image, output_hidden_states=True)
|
|
|
45 |
return siglip_image_embeds, siglip_image_shallow_embeds
|
46 |
|
47 |
|
48 |
+
@torch.no_grad()
|
49 |
def encode_dinov2_image_emb(self, dinov2_image, device, dtype):
|
50 |
dinov2_image = dinov2_image.to(device, dtype=dtype)
|
51 |
res = self.dino_image_encoder_2(dinov2_image, output_hidden_states=True)
|
|
|
57 |
return dinov2_image_embeds, dinov2_image_shallow_embeds
|
58 |
|
59 |
|
60 |
+
@torch.no_grad()
|
61 |
def encode_image_emb(self, siglip_image, device, dtype):
|
62 |
object_image_pil = siglip_image
|
63 |
object_image_pil_low_res = [object_image_pil.resize((384, 384))]
|
|
|
104 |
return image_embeds_dict
|
105 |
|
106 |
|
107 |
+
@torch.no_grad()
|
108 |
def init_ccp_and_attn_processor(self, *args, **kwargs):
|
109 |
subject_ip_adapter_path = kwargs['subject_ip_adapter_path']
|
110 |
nb_token = kwargs['nb_token']
|
|
|
150 |
self.subject_image_proj_model = image_proj_model
|
151 |
|
152 |
|
153 |
+
@torch.no_grad()
|
154 |
def init_adapter(
|
155 |
self,
|
156 |
image_encoder_path=None,
|