Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -461,8 +461,8 @@ def generate_30(
|
|
461 |
del model5
|
462 |
del processor5
|
463 |
gc.collect()
|
464 |
-
|
465 |
-
|
466 |
#expanded = expand_prompt(captions)
|
467 |
new_prompt = prompt + ' ' + captions
|
468 |
print("-- ------------ --")
|
@@ -471,7 +471,7 @@ def generate_30(
|
|
471 |
print("-- FINAL PROMPT --")
|
472 |
print("-- ------------ --")
|
473 |
gc.collect()
|
474 |
-
|
475 |
global text_encoder_1
|
476 |
global text_encoder_2
|
477 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|
@@ -590,8 +590,8 @@ def generate_60(
|
|
590 |
del model5
|
591 |
del processor5
|
592 |
gc.collect()
|
593 |
-
|
594 |
-
|
595 |
#expanded = expand_prompt(captions)
|
596 |
new_prompt = prompt + ' ' + captions
|
597 |
print("-- ------------ --")
|
@@ -600,7 +600,7 @@ def generate_60(
|
|
600 |
print("-- FINAL PROMPT --")
|
601 |
print("-- ------------ --")
|
602 |
gc.collect()
|
603 |
-
|
604 |
global text_encoder_1
|
605 |
global text_encoder_2
|
606 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|
@@ -719,8 +719,8 @@ def generate_90(
|
|
719 |
del model5
|
720 |
del processor5
|
721 |
gc.collect()
|
722 |
-
|
723 |
-
|
724 |
#expanded = expand_prompt(captions)
|
725 |
new_prompt = prompt + ' ' + captions
|
726 |
print("-- ------------ --")
|
@@ -729,7 +729,7 @@ def generate_90(
|
|
729 |
print("-- FINAL PROMPT --")
|
730 |
print("-- ------------ --")
|
731 |
gc.collect()
|
732 |
-
|
733 |
global text_encoder_1
|
734 |
global text_encoder_2
|
735 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|
|
|
461 |
del model5
|
462 |
del processor5
|
463 |
gc.collect()
|
464 |
+
torch.cuda.empty_cache()
|
465 |
+
torch.cuda.reset_peak_memory_stats()
|
466 |
#expanded = expand_prompt(captions)
|
467 |
new_prompt = prompt + ' ' + captions
|
468 |
print("-- ------------ --")
|
|
|
471 |
print("-- FINAL PROMPT --")
|
472 |
print("-- ------------ --")
|
473 |
gc.collect()
|
474 |
+
torch.cuda.empty_cache()
|
475 |
global text_encoder_1
|
476 |
global text_encoder_2
|
477 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|
|
|
590 |
del model5
|
591 |
del processor5
|
592 |
gc.collect()
|
593 |
+
torch.cuda.empty_cache()
|
594 |
+
torch.cuda.reset_peak_memory_stats()
|
595 |
#expanded = expand_prompt(captions)
|
596 |
new_prompt = prompt + ' ' + captions
|
597 |
print("-- ------------ --")
|
|
|
600 |
print("-- FINAL PROMPT --")
|
601 |
print("-- ------------ --")
|
602 |
gc.collect()
|
603 |
+
torch.cuda.empty_cache()
|
604 |
global text_encoder_1
|
605 |
global text_encoder_2
|
606 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|
|
|
719 |
del model5
|
720 |
del processor5
|
721 |
gc.collect()
|
722 |
+
torch.cuda.empty_cache()
|
723 |
+
torch.cuda.reset_peak_memory_stats()
|
724 |
#expanded = expand_prompt(captions)
|
725 |
new_prompt = prompt + ' ' + captions
|
726 |
print("-- ------------ --")
|
|
|
729 |
print("-- FINAL PROMPT --")
|
730 |
print("-- ------------ --")
|
731 |
gc.collect()
|
732 |
+
torch.cuda.empty_cache()
|
733 |
global text_encoder_1
|
734 |
global text_encoder_2
|
735 |
pipe.text_encoder=text_encoder_1.to(device=device, dtype=torch.bfloat16)
|