Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -217,7 +217,7 @@ def generate_30(
|
|
217 |
pooled_prompt_embeds_a = prompt_embeds_a[0] # Pooled output from encoder 1
|
218 |
prompt_embeds_a = prompt_embeds_a.hidden_states[-2] # Penultimate hidden state from encoder 1
|
219 |
|
220 |
-
prompt_embeds_b = pipe.
|
221 |
pooled_prompt_embeds_b = prompt_embeds_b[0] # Pooled output from encoder 2
|
222 |
prompt_embeds_b = prompt_embeds_b.hidden_states[-2] # Penultimate hidden state from encoder 2
|
223 |
|
@@ -299,7 +299,7 @@ def generate_60(
|
|
299 |
pooled_prompt_embeds_a = prompt_embeds_a[0] # Pooled output from encoder 1
|
300 |
prompt_embeds_a = prompt_embeds_a.hidden_states[-2] # Penultimate hidden state from encoder 1
|
301 |
|
302 |
-
prompt_embeds_b = pipe.
|
303 |
pooled_prompt_embeds_b = prompt_embeds_b[0] # Pooled output from encoder 2
|
304 |
prompt_embeds_b = prompt_embeds_b.hidden_states[-2] # Penultimate hidden state from encoder 2
|
305 |
|
@@ -381,7 +381,7 @@ def generate_90(
|
|
381 |
pooled_prompt_embeds_a = prompt_embeds_a[0] # Pooled output from encoder 1
|
382 |
prompt_embeds_a = prompt_embeds_a.hidden_states[-2] # Penultimate hidden state from encoder 1
|
383 |
|
384 |
-
prompt_embeds_b = pipe.
|
385 |
pooled_prompt_embeds_b = prompt_embeds_b[0] # Pooled output from encoder 2
|
386 |
prompt_embeds_b = prompt_embeds_b.hidden_states[-2] # Penultimate hidden state from encoder 2
|
387 |
|
|
|
217 |
pooled_prompt_embeds_a = prompt_embeds_a[0] # Pooled output from encoder 1
|
218 |
prompt_embeds_a = prompt_embeds_a.hidden_states[-2] # Penultimate hidden state from encoder 1
|
219 |
|
220 |
+
prompt_embeds_b = pipe.text_encoder(text_input_ids2.to(torch.device('cuda')), output_hidden_states=True)
|
221 |
pooled_prompt_embeds_b = prompt_embeds_b[0] # Pooled output from encoder 2
|
222 |
prompt_embeds_b = prompt_embeds_b.hidden_states[-2] # Penultimate hidden state from encoder 2
|
223 |
|
|
|
299 |
pooled_prompt_embeds_a = prompt_embeds_a[0] # Pooled output from encoder 1
|
300 |
prompt_embeds_a = prompt_embeds_a.hidden_states[-2] # Penultimate hidden state from encoder 1
|
301 |
|
302 |
+
prompt_embeds_b = pipe.text_encoder(text_input_ids2.to(torch.device('cuda')), output_hidden_states=True)
|
303 |
pooled_prompt_embeds_b = prompt_embeds_b[0] # Pooled output from encoder 2
|
304 |
prompt_embeds_b = prompt_embeds_b.hidden_states[-2] # Penultimate hidden state from encoder 2
|
305 |
|
|
|
381 |
pooled_prompt_embeds_a = prompt_embeds_a[0] # Pooled output from encoder 1
|
382 |
prompt_embeds_a = prompt_embeds_a.hidden_states[-2] # Penultimate hidden state from encoder 1
|
383 |
|
384 |
+
prompt_embeds_b = pipe.text_encoder(text_input_ids2.to(torch.device('cuda')), output_hidden_states=True)
|
385 |
pooled_prompt_embeds_b = prompt_embeds_b[0] # Pooled output from encoder 2
|
386 |
prompt_embeds_b = prompt_embeds_b.hidden_states[-2] # Penultimate hidden state from encoder 2
|
387 |
|