MohamedRashad commited on
Commit
8e360af
·
1 Parent(s): 8e9da2c

Remove device initialization and logging from load_transformer function; set device parameter to None for flexibility

Browse files
Files changed (1) hide show
  1. app.py +1 -3
app.py CHANGED
@@ -310,8 +310,6 @@ def load_visual_tokenizer(args):
310
  return vae
311
 
312
  def load_transformer(vae, args):
313
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
314
- print(f"Device: {device}")
315
  model_path = args.model_path
316
  if args.checkpoint_type == 'torch':
317
  # copy large model to local; save slim to local; and copy slim to nas; load local slim model
@@ -368,7 +366,7 @@ def load_transformer(vae, args):
368
  model_path=slim_model_path,
369
  scale_schedule=None,
370
  vae=vae,
371
- device=device,
372
  model_kwargs=kwargs_model,
373
  text_channels=args.text_channels,
374
  apply_spatial_patchify=args.apply_spatial_patchify,
 
310
  return vae
311
 
312
  def load_transformer(vae, args):
 
 
313
  model_path = args.model_path
314
  if args.checkpoint_type == 'torch':
315
  # copy large model to local; save slim to local; and copy slim to nas; load local slim model
 
366
  model_path=slim_model_path,
367
  scale_schedule=None,
368
  vae=vae,
369
+ device=None,
370
  model_kwargs=kwargs_model,
371
  text_channels=args.text_channels,
372
  apply_spatial_patchify=args.apply_spatial_patchify,