JiantaoLin commited on
Commit
0762ab7
·
1 Parent(s): 90eb8de
Files changed (2) hide show
  1. app.py +1 -1
  2. pipeline/kiss3d_wrapper.py +1 -2
app.py CHANGED
@@ -143,7 +143,7 @@ def save_py3dmesh_with_trimesh_fast(meshes, save_glb_path=TEMP_MESH_ADDRESS, app
143
  print(f"saving to {save_glb_path}")
144
  #
145
  #
146
- @spaces.GPU
147
  def text_to_detailed(prompt, seed=None):
148
  # print(torch.cuda.is_available())
149
  # print(f"Before text_to_detailed: {torch.cuda.memory_allocated() / 1024**3} GB")
 
143
  print(f"saving to {save_glb_path}")
144
  #
145
  #
146
+ # @spaces.GPU
147
  def text_to_detailed(prompt, seed=None):
148
  # print(torch.cuda.is_available())
149
  # print(f"Before text_to_detailed: {torch.cuda.memory_allocated() / 1024**3} GB")
pipeline/kiss3d_wrapper.py CHANGED
@@ -256,8 +256,7 @@ class kiss3d_wrapper(object):
256
  caption_text = self.get_detailed_prompt(caption_text)
257
 
258
  return caption_text
259
-
260
- @spaces.GPU
261
  def get_detailed_prompt(self, prompt, seed=None):
262
  if self.llm_model is not None:
263
  detailed_prompt = get_llm_response(self.llm_model, self.llm_tokenizer, prompt, seed=seed)
 
256
  caption_text = self.get_detailed_prompt(caption_text)
257
 
258
  return caption_text
259
+
 
260
  def get_detailed_prompt(self, prompt, seed=None):
261
  if self.llm_model is not None:
262
  detailed_prompt = get_llm_response(self.llm_model, self.llm_tokenizer, prompt, seed=seed)