MohamedRashad commited on
Commit
920cc4d
·
1 Parent(s): 8e360af

Remove cache_enabled parameter from autocast context in load_infinity function for cleaner implementation

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -212,7 +212,7 @@ def load_infinity(
212
  text_maxlen = 512
213
  torch.cuda.empty_cache()
214
 
215
- with torch.amp.autocast(device_type=device, enabled=bf16, dtype=autocast_dtype, cache_enabled=True), torch.no_grad():
216
  infinity_test: Infinity = Infinity(
217
  vae_local=vae, text_channels=text_channels, text_maxlen=text_maxlen,
218
  shared_aln=True, raw_scale_schedule=scale_schedule,
 
212
  text_maxlen = 512
213
  torch.cuda.empty_cache()
214
 
215
+ with torch.amp.autocast(device_type=device, dtype=autocast_dtype), torch.no_grad():
216
  infinity_test: Infinity = Infinity(
217
  vae_local=vae, text_channels=text_channels, text_maxlen=text_maxlen,
218
  shared_aln=True, raw_scale_schedule=scale_schedule,