Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -427,6 +427,11 @@ def generate_30(
|
|
427 |
expanded_1 = expanded[0]
|
428 |
expanded_2 = expanded[1]
|
429 |
new_prompt = prompt+' '+expanded_1
|
|
|
|
|
|
|
|
|
|
|
430 |
global model
|
431 |
global txt_tokenizer
|
432 |
del model
|
@@ -569,18 +574,15 @@ def generate_60(
|
|
569 |
del processor5
|
570 |
gc.collect()
|
571 |
torch.cuda.empty_cache()
|
572 |
-
expanded = expand_prompt(
|
573 |
expanded_1 = expanded[0]
|
574 |
expanded_2 = expanded[1]
|
575 |
-
|
576 |
-
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
prompt = flatten_and_stringify(prompt+expanded_1+expanded_2)
|
582 |
-
prompt = " ".join(prompt)
|
583 |
-
|
584 |
global model
|
585 |
global txt_tokenizer
|
586 |
del model
|
@@ -723,18 +725,15 @@ def generate_90(
|
|
723 |
del processor5
|
724 |
gc.collect()
|
725 |
torch.cuda.empty_cache()
|
726 |
-
expanded = expand_prompt(
|
727 |
expanded_1 = expanded[0]
|
728 |
expanded_2 = expanded[1]
|
729 |
-
|
730 |
-
|
731 |
-
|
732 |
-
|
733 |
-
|
734 |
-
|
735 |
-
prompt = flatten_and_stringify(prompt+expanded_1+expanded_2)
|
736 |
-
prompt = " ".join(prompt)
|
737 |
-
|
738 |
global model
|
739 |
global txt_tokenizer
|
740 |
del model
|
|
|
427 |
expanded_1 = expanded[0]
|
428 |
expanded_2 = expanded[1]
|
429 |
new_prompt = prompt+' '+expanded_1
|
430 |
+
print("-- ------------ --")
|
431 |
+
print("-- FINAL PROMPT --")
|
432 |
+
print(new_prompt)
|
433 |
+
print("-- FINAL PROMPT --")
|
434 |
+
print("-- ------------ --")
|
435 |
global model
|
436 |
global txt_tokenizer
|
437 |
del model
|
|
|
574 |
del processor5
|
575 |
gc.collect()
|
576 |
torch.cuda.empty_cache()
|
577 |
+
expanded = expand_prompt(caption)
|
578 |
expanded_1 = expanded[0]
|
579 |
expanded_2 = expanded[1]
|
580 |
+
new_prompt = prompt+' '+expanded_1
|
581 |
+
print("-- ------------ --")
|
582 |
+
print("-- FINAL PROMPT --")
|
583 |
+
print(new_prompt)
|
584 |
+
print("-- FINAL PROMPT --")
|
585 |
+
print("-- ------------ --")
|
|
|
|
|
|
|
586 |
global model
|
587 |
global txt_tokenizer
|
588 |
del model
|
|
|
725 |
del processor5
|
726 |
gc.collect()
|
727 |
torch.cuda.empty_cache()
|
728 |
+
expanded = expand_prompt(caption)
|
729 |
expanded_1 = expanded[0]
|
730 |
expanded_2 = expanded[1]
|
731 |
+
new_prompt = prompt+' '+expanded_1
|
732 |
+
print("-- ------------ --")
|
733 |
+
print("-- FINAL PROMPT --")
|
734 |
+
print(new_prompt)
|
735 |
+
print("-- FINAL PROMPT --")
|
736 |
+
print("-- ------------ --")
|
|
|
|
|
|
|
737 |
global model
|
738 |
global txt_tokenizer
|
739 |
del model
|