Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -296,12 +296,15 @@ def expand_prompt(prompt):
|
|
| 296 |
outputs = model.generate(
|
| 297 |
input_ids=input_ids,
|
| 298 |
attention_mask=attention_mask,
|
| 299 |
-
max_new_tokens=
|
| 300 |
temperature=0.2,
|
| 301 |
top_p=0.9,
|
| 302 |
do_sample=True,
|
| 303 |
)
|
| 304 |
enhanced_prompt = txt_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
|
|
|
|
|
|
| 305 |
enhanced_prompt = filter_text(enhanced_prompt,prompt)
|
| 306 |
enhanced_prompt = filter_text(enhanced_prompt,user_prompt_rewrite)
|
| 307 |
enhanced_prompt = filter_text(enhanced_prompt,system_prompt_rewrite)
|
|
@@ -314,13 +317,15 @@ def expand_prompt(prompt):
|
|
| 314 |
outputs_2 = model.generate(
|
| 315 |
input_ids=input_ids_2,
|
| 316 |
attention_mask=attention_mask_2,
|
| 317 |
-
max_new_tokens=
|
| 318 |
temperature=0.2,
|
| 319 |
top_p=0.9,
|
| 320 |
do_sample=True,
|
| 321 |
)
|
| 322 |
# Use the encoded tensor 'text_inputs' here
|
| 323 |
enhanced_prompt_2 = txt_tokenizer.decode(outputs_2[0], skip_special_tokens=True)
|
|
|
|
|
|
|
| 324 |
enhanced_prompt_2 = filter_text(enhanced_prompt_2,prompt)
|
| 325 |
enhanced_prompt_2 = filter_text(enhanced_prompt_2,user_prompt_rewrite_2)
|
| 326 |
enhanced_prompt_2 = filter_text(enhanced_prompt_2,system_prompt_rewrite)
|
|
|
|
| 296 |
outputs = model.generate(
|
| 297 |
input_ids=input_ids,
|
| 298 |
attention_mask=attention_mask,
|
| 299 |
+
max_new_tokens=256,
|
| 300 |
temperature=0.2,
|
| 301 |
top_p=0.9,
|
| 302 |
do_sample=True,
|
| 303 |
)
|
| 304 |
enhanced_prompt = txt_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 305 |
+
print('-- generated prompt 1 --')
|
| 306 |
+
print(enhanced_prompt)
|
| 307 |
+
|
| 308 |
enhanced_prompt = filter_text(enhanced_prompt,prompt)
|
| 309 |
enhanced_prompt = filter_text(enhanced_prompt,user_prompt_rewrite)
|
| 310 |
enhanced_prompt = filter_text(enhanced_prompt,system_prompt_rewrite)
|
|
|
|
| 317 |
outputs_2 = model.generate(
|
| 318 |
input_ids=input_ids_2,
|
| 319 |
attention_mask=attention_mask_2,
|
| 320 |
+
max_new_tokens=256,
|
| 321 |
temperature=0.2,
|
| 322 |
top_p=0.9,
|
| 323 |
do_sample=True,
|
| 324 |
)
|
| 325 |
# Use the encoded tensor 'text_inputs' here
|
| 326 |
enhanced_prompt_2 = txt_tokenizer.decode(outputs_2[0], skip_special_tokens=True)
|
| 327 |
+
print('-- generated prompt 2 --')
|
| 328 |
+
print(enhanced_prompt_2)
|
| 329 |
enhanced_prompt_2 = filter_text(enhanced_prompt_2,prompt)
|
| 330 |
enhanced_prompt_2 = filter_text(enhanced_prompt_2,user_prompt_rewrite_2)
|
| 331 |
enhanced_prompt_2 = filter_text(enhanced_prompt_2,system_prompt_rewrite)
|