Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
920cc4d
1
Parent(s):
8e360af
Remove cache_enabled parameter from autocast context in load_infinity function for cleaner implementation
Browse files
app.py
CHANGED
@@ -212,7 +212,7 @@ def load_infinity(
|
|
212 |
text_maxlen = 512
|
213 |
torch.cuda.empty_cache()
|
214 |
|
215 |
-
with torch.amp.autocast(device_type=device,
|
216 |
infinity_test: Infinity = Infinity(
|
217 |
vae_local=vae, text_channels=text_channels, text_maxlen=text_maxlen,
|
218 |
shared_aln=True, raw_scale_schedule=scale_schedule,
|
|
|
212 |
text_maxlen = 512
|
213 |
torch.cuda.empty_cache()
|
214 |
|
215 |
+
with torch.amp.autocast(device_type=device, dtype=autocast_dtype), torch.no_grad():
|
216 |
infinity_test: Infinity = Infinity(
|
217 |
vae_local=vae, text_channels=text_channels, text_maxlen=text_maxlen,
|
218 |
shared_aln=True, raw_scale_schedule=scale_schedule,
|