JiantaoLin commited on
Commit
e784881
Β·
1 Parent(s): 6a8a55e
Files changed (2) hide show
  1. app.py +1 -1
  2. pipeline/kiss3d_wrapper.py +0 -1
app.py CHANGED
@@ -223,7 +223,7 @@ def bundle_image_to_mesh(
223
  ):
224
  global mesh_cache
225
  print(f"Before bundle_image_to_mesh: {torch.cuda.memory_allocated() / 1024**3} GB")
226
-
227
  # TODO: delete this later
228
  k3d_wrapper.del_llm_model()
229
 
 
223
  ):
224
  global mesh_cache
225
  print(f"Before bundle_image_to_mesh: {torch.cuda.memory_allocated() / 1024**3} GB")
226
+ k3d_wrapper.recon_model.init_flexicubes_geometry("cuda:0", fovy=50.0)
227
  # TODO: delete this later
228
  k3d_wrapper.del_llm_model()
229
 
pipeline/kiss3d_wrapper.py CHANGED
@@ -142,7 +142,6 @@ def init_wrapper_from_config(config_path):
142
  state_dict = {k[14:]: v for k, v in state_dict.items() if k.startswith('lrm_generator.')}
143
  recon_model.load_state_dict(state_dict, strict=True)
144
  recon_model.to(recon_device)
145
- recon_model.init_flexicubes_geometry(recon_device, fovy=50.0)
146
  recon_model.eval()
147
  logger.warning(f"GPU memory allocated after load reconstruction model on {recon_device}: {torch.cuda.memory_allocated(device=recon_device) / 1024**3} GB")
148
 
 
142
  state_dict = {k[14:]: v for k, v in state_dict.items() if k.startswith('lrm_generator.')}
143
  recon_model.load_state_dict(state_dict, strict=True)
144
  recon_model.to(recon_device)
 
145
  recon_model.eval()
146
  logger.warning(f"GPU memory allocated after load reconstruction model on {recon_device}: {torch.cuda.memory_allocated(device=recon_device) / 1024**3} GB")
147