Spaces:
Runtime error
Runtime error
Commit
Β·
baf25cb
1
Parent(s):
87f3904
Add more prints to function
Browse files
app.py
CHANGED
@@ -97,6 +97,7 @@ pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",
|
|
97 |
def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, token=None):
|
98 |
loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu")
|
99 |
|
|
|
100 |
# separate token and the embeds
|
101 |
trained_token = list(loaded_learned_embeds.keys())[0]
|
102 |
embeds = loaded_learned_embeds[trained_token]
|
@@ -108,10 +109,15 @@ def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, tok
|
|
108 |
token = token if token is not None else trained_token
|
109 |
num_added_tokens = tokenizer.add_tokens(token)
|
110 |
i = 1
|
|
|
111 |
while(num_added_tokens == 0):
|
112 |
token = f"{token[:-1]}-{i}>"
|
113 |
num_added_tokens = tokenizer.add_tokens(token)
|
|
|
|
|
|
|
114 |
i+=1
|
|
|
115 |
|
116 |
# resize the token embeddings
|
117 |
text_encoder.resize_token_embeddings(len(tokenizer))
|
@@ -119,9 +125,18 @@ def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, tok
|
|
119 |
# get the id for the token and assign the embeds
|
120 |
token_id = tokenizer.convert_tokens_to_ids(token)
|
121 |
print("&&&&&&&&&&&&&&&&")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
print("text_encoder --> ", text_encoder)
|
123 |
print("token_id --> ", token_id)
|
124 |
print("embeds --> ", embeds)
|
|
|
125 |
text_encoder.get_input_embeddings().weight.data[token_id] = embeds # <------ POINT OF FAILURE
|
126 |
return token
|
127 |
|
|
|
97 |
def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, token=None):
|
98 |
loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu")
|
99 |
|
100 |
+
_old_token = token
|
101 |
# separate token and the embeds
|
102 |
trained_token = list(loaded_learned_embeds.keys())[0]
|
103 |
embeds = loaded_learned_embeds[trained_token]
|
|
|
109 |
token = token if token is not None else trained_token
|
110 |
num_added_tokens = tokenizer.add_tokens(token)
|
111 |
i = 1
|
112 |
+
print("start while loop **************")
|
113 |
while(num_added_tokens == 0):
|
114 |
token = f"{token[:-1]}-{i}>"
|
115 |
num_added_tokens = tokenizer.add_tokens(token)
|
116 |
+
print("i --> ", i)
|
117 |
+
print("token --> ", token)
|
118 |
+
print("num_added_tokens --> ", num_added_tokens)
|
119 |
i+=1
|
120 |
+
print("end while loop **************")
|
121 |
|
122 |
# resize the token embeddings
|
123 |
text_encoder.resize_token_embeddings(len(tokenizer))
|
|
|
125 |
# get the id for the token and assign the embeds
|
126 |
token_id = tokenizer.convert_tokens_to_ids(token)
|
127 |
print("&&&&&&&&&&&&&&&&")
|
128 |
+
print("learned_embeds_path --> ", learned_embeds_path)
|
129 |
+
print("text_encoder --> ", text_encoder)
|
130 |
+
print("tokenizer --> ", tokenizer)
|
131 |
+
print("_old_token --> ", _old_token)
|
132 |
+
print("token --> ", token)
|
133 |
+
print("trained_token --> ", trained_token)
|
134 |
+
print("dtype --> ", dtype)
|
135 |
+
print("num_added_tokens --> ", num_added_tokens)
|
136 |
print("text_encoder --> ", text_encoder)
|
137 |
print("token_id --> ", token_id)
|
138 |
print("embeds --> ", embeds)
|
139 |
+
print("&&&&&&&&&&&&&&&&")
|
140 |
text_encoder.get_input_embeddings().weight.data[token_id] = embeds # <------ POINT OF FAILURE
|
141 |
return token
|
142 |
|