Spaces:
Runtime error
Runtime error
Commit
Β·
87f3904
1
Parent(s):
0d385d9
Add prints at point of failure
Browse files
app.py
CHANGED
@@ -95,43 +95,34 @@ pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",
|
|
95 |
# pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="fp16", torch_dtype=torch.float16).to("cuda")
|
96 |
|
97 |
def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, token=None):
|
98 |
-
print("1 <****************")
|
99 |
loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu")
|
100 |
|
101 |
# separate token and the embeds
|
102 |
trained_token = list(loaded_learned_embeds.keys())[0]
|
103 |
-
print("2 <****************")
|
104 |
embeds = loaded_learned_embeds[trained_token]
|
105 |
-
print("3 <****************")
|
106 |
|
107 |
# cast to dtype of text_encoder
|
108 |
dtype = text_encoder.get_input_embeddings().weight.dtype
|
109 |
-
print("4 <****************")
|
110 |
|
111 |
# add the token in tokenizer
|
112 |
token = token if token is not None else trained_token
|
113 |
-
print("5 <****************")
|
114 |
num_added_tokens = tokenizer.add_tokens(token)
|
115 |
-
print("6 <****************")
|
116 |
i = 1
|
117 |
while(num_added_tokens == 0):
|
118 |
-
print(f"The tokenizer already contains the token {token}.")
|
119 |
token = f"{token[:-1]}-{i}>"
|
120 |
-
print(f"Attempting to add the token {token}.")
|
121 |
num_added_tokens = tokenizer.add_tokens(token)
|
122 |
-
print("7 <****************")
|
123 |
i+=1
|
124 |
-
print("8 <****************")
|
125 |
|
126 |
# resize the token embeddings
|
127 |
text_encoder.resize_token_embeddings(len(tokenizer))
|
128 |
-
print("9 <****************")
|
129 |
|
130 |
# get the id for the token and assign the embeds
|
131 |
token_id = tokenizer.convert_tokens_to_ids(token)
|
132 |
-
print("
|
133 |
-
text_encoder
|
134 |
-
print("
|
|
|
|
|
135 |
return token
|
136 |
|
137 |
|
|
|
95 |
# pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="fp16", torch_dtype=torch.float16).to("cuda")
|
96 |
|
97 |
def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, token=None):
|
|
|
98 |
loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu")
|
99 |
|
100 |
# separate token and the embeds
|
101 |
trained_token = list(loaded_learned_embeds.keys())[0]
|
|
|
102 |
embeds = loaded_learned_embeds[trained_token]
|
|
|
103 |
|
104 |
# cast to dtype of text_encoder
|
105 |
dtype = text_encoder.get_input_embeddings().weight.dtype
|
|
|
106 |
|
107 |
# add the token in tokenizer
|
108 |
token = token if token is not None else trained_token
|
|
|
109 |
num_added_tokens = tokenizer.add_tokens(token)
|
|
|
110 |
i = 1
|
111 |
while(num_added_tokens == 0):
|
|
|
112 |
token = f"{token[:-1]}-{i}>"
|
|
|
113 |
num_added_tokens = tokenizer.add_tokens(token)
|
|
|
114 |
i+=1
|
|
|
115 |
|
116 |
# resize the token embeddings
|
117 |
text_encoder.resize_token_embeddings(len(tokenizer))
|
|
|
118 |
|
119 |
# get the id for the token and assign the embeds
|
120 |
token_id = tokenizer.convert_tokens_to_ids(token)
|
121 |
+
print("&&&&&&&&&&&&&&&&")
|
122 |
+
print("text_encoder --> ", text_encoder)
|
123 |
+
print("token_id --> ", token_id)
|
124 |
+
print("embeds --> ", embeds)
|
125 |
+
text_encoder.get_input_embeddings().weight.data[token_id] = embeds # <------ POINT OF FAILURE
|
126 |
return token
|
127 |
|
128 |
|