Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -305,6 +305,10 @@ def expand_prompt(prompt):
|
|
| 305 |
)
|
| 306 |
enhanced_prompt = txt_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 307 |
enhanced_prompt = filter_text(enhanced_prompt,prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
input_text_2 = f"{system_prompt_rewrite} {user_prompt_rewrite_2} {enhanced_prompt}"
|
| 309 |
encoded_inputs_2 = txt_tokenizer(input_text_2, return_tensors="pt", return_attention_mask=True).to("cuda:0")
|
| 310 |
input_ids_2 = encoded_inputs_2["input_ids"].to("cuda:0")
|
|
@@ -319,10 +323,9 @@ def expand_prompt(prompt):
|
|
| 319 |
)
|
| 320 |
# Use the encoded tensor 'text_inputs' here
|
| 321 |
enhanced_prompt_2 = txt_tokenizer.decode(outputs_2[0], skip_special_tokens=True)
|
| 322 |
-
print('-- generated prompt --')
|
| 323 |
enhanced_prompt_2 = filter_text(enhanced_prompt_2,prompt)
|
| 324 |
-
|
| 325 |
-
|
| 326 |
print('-- filtered prompt 2 --')
|
| 327 |
print(enhanced_prompt_2)
|
| 328 |
enh_prompt=[enhanced_prompt,enhanced_prompt_2]
|
|
@@ -426,7 +429,7 @@ def generate_30(
|
|
| 426 |
expanded = expand_prompt(caption)
|
| 427 |
expanded_1 = expanded[0]
|
| 428 |
expanded_2 = expanded[1]
|
| 429 |
-
new_prompt = prompt+' '+expanded_1
|
| 430 |
print("-- ------------ --")
|
| 431 |
print("-- FINAL PROMPT --")
|
| 432 |
print(new_prompt)
|
|
@@ -577,7 +580,7 @@ def generate_60(
|
|
| 577 |
expanded = expand_prompt(caption)
|
| 578 |
expanded_1 = expanded[0]
|
| 579 |
expanded_2 = expanded[1]
|
| 580 |
-
new_prompt = prompt+' '+expanded_1
|
| 581 |
print("-- ------------ --")
|
| 582 |
print("-- FINAL PROMPT --")
|
| 583 |
print(new_prompt)
|
|
@@ -728,7 +731,7 @@ def generate_90(
|
|
| 728 |
expanded = expand_prompt(caption)
|
| 729 |
expanded_1 = expanded[0]
|
| 730 |
expanded_2 = expanded[1]
|
| 731 |
-
new_prompt = prompt+' '+expanded_1
|
| 732 |
print("-- ------------ --")
|
| 733 |
print("-- FINAL PROMPT --")
|
| 734 |
print(new_prompt)
|
|
|
|
| 305 |
)
|
| 306 |
enhanced_prompt = txt_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 307 |
enhanced_prompt = filter_text(enhanced_prompt,prompt)
|
| 308 |
+
enhanced_prompt = filter_text(enhanced_prompt,user_prompt_rewrite)
|
| 309 |
+
enhanced_prompt = filter_text(enhanced_prompt,system_prompt_rewrite)
|
| 310 |
+
print('-- filtered prompt --')
|
| 311 |
+
print(enhanced_prompt)
|
| 312 |
input_text_2 = f"{system_prompt_rewrite} {user_prompt_rewrite_2} {enhanced_prompt}"
|
| 313 |
encoded_inputs_2 = txt_tokenizer(input_text_2, return_tensors="pt", return_attention_mask=True).to("cuda:0")
|
| 314 |
input_ids_2 = encoded_inputs_2["input_ids"].to("cuda:0")
|
|
|
|
| 323 |
)
|
| 324 |
# Use the encoded tensor 'text_inputs' here
|
| 325 |
enhanced_prompt_2 = txt_tokenizer.decode(outputs_2[0], skip_special_tokens=True)
|
|
|
|
| 326 |
enhanced_prompt_2 = filter_text(enhanced_prompt_2,prompt)
|
| 327 |
+
enhanced_prompt_2 = filter_text(enhanced_prompt_2,user_prompt_rewrite_2)
|
| 328 |
+
enhanced_prompt_2 = filter_text(enhanced_prompt_2,system_prompt_rewrite)
|
| 329 |
print('-- filtered prompt 2 --')
|
| 330 |
print(enhanced_prompt_2)
|
| 331 |
enh_prompt=[enhanced_prompt,enhanced_prompt_2]
|
|
|
|
| 429 |
expanded = expand_prompt(caption)
|
| 430 |
expanded_1 = expanded[0]
|
| 431 |
expanded_2 = expanded[1]
|
| 432 |
+
new_prompt = prompt+' '+expanded_1+' '+expanded_2
|
| 433 |
print("-- ------------ --")
|
| 434 |
print("-- FINAL PROMPT --")
|
| 435 |
print(new_prompt)
|
|
|
|
| 580 |
expanded = expand_prompt(caption)
|
| 581 |
expanded_1 = expanded[0]
|
| 582 |
expanded_2 = expanded[1]
|
| 583 |
+
new_prompt = prompt+' '+expanded_1+' '+expanded_2
|
| 584 |
print("-- ------------ --")
|
| 585 |
print("-- FINAL PROMPT --")
|
| 586 |
print(new_prompt)
|
|
|
|
| 731 |
expanded = expand_prompt(caption)
|
| 732 |
expanded_1 = expanded[0]
|
| 733 |
expanded_2 = expanded[1]
|
| 734 |
+
new_prompt = prompt+' '+expanded_1+' '+expanded_2
|
| 735 |
print("-- ------------ --")
|
| 736 |
print("-- FINAL PROMPT --")
|
| 737 |
print(new_prompt)
|