Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -442,10 +442,10 @@ def generate_30(
|
|
| 442 |
print("-- FINAL PROMPT --")
|
| 443 |
print("-- ------------ --")
|
| 444 |
|
| 445 |
-
global model
|
| 446 |
-
global txt_tokenizer
|
| 447 |
-
del model
|
| 448 |
-
del txt_tokenizer
|
| 449 |
gc.collect()
|
| 450 |
torch.cuda.empty_cache()
|
| 451 |
global text_encoder_1
|
|
@@ -593,10 +593,10 @@ def generate_60(
|
|
| 593 |
print(new_prompt)
|
| 594 |
print("-- FINAL PROMPT --")
|
| 595 |
print("-- ------------ --")
|
| 596 |
-
global model
|
| 597 |
-
global txt_tokenizer
|
| 598 |
-
del model
|
| 599 |
-
del txt_tokenizer
|
| 600 |
gc.collect()
|
| 601 |
torch.cuda.empty_cache()
|
| 602 |
global text_encoder_1
|
|
@@ -745,10 +745,10 @@ def generate_90(
|
|
| 745 |
print(new_prompt)
|
| 746 |
print("-- FINAL PROMPT --")
|
| 747 |
print("-- ------------ --")
|
| 748 |
-
global model
|
| 749 |
-
global txt_tokenizer
|
| 750 |
-
del model
|
| 751 |
-
del txt_tokenizer
|
| 752 |
gc.collect()
|
| 753 |
torch.cuda.empty_cache()
|
| 754 |
global text_encoder_1
|
|
|
|
| 442 |
print("-- FINAL PROMPT --")
|
| 443 |
print("-- ------------ --")
|
| 444 |
|
| 445 |
+
#global model
|
| 446 |
+
#global txt_tokenizer
|
| 447 |
+
#del model
|
| 448 |
+
#del txt_tokenizer
|
| 449 |
gc.collect()
|
| 450 |
torch.cuda.empty_cache()
|
| 451 |
global text_encoder_1
|
|
|
|
| 593 |
print(new_prompt)
|
| 594 |
print("-- FINAL PROMPT --")
|
| 595 |
print("-- ------------ --")
|
| 596 |
+
#global model
|
| 597 |
+
#global txt_tokenizer
|
| 598 |
+
#del model
|
| 599 |
+
#del txt_tokenizer
|
| 600 |
gc.collect()
|
| 601 |
torch.cuda.empty_cache()
|
| 602 |
global text_encoder_1
|
|
|
|
| 745 |
print(new_prompt)
|
| 746 |
print("-- FINAL PROMPT --")
|
| 747 |
print("-- ------------ --")
|
| 748 |
+
#global model
|
| 749 |
+
#global txt_tokenizer
|
| 750 |
+
#del model
|
| 751 |
+
#del txt_tokenizer
|
| 752 |
gc.collect()
|
| 753 |
torch.cuda.empty_cache()
|
| 754 |
global text_encoder_1
|