LutaoJiang commited on
Commit
bce015a
Β·
1 Parent(s): 8a16430
Files changed (1) hide show
  1. app.py +110 -132
app.py CHANGED
@@ -74,11 +74,12 @@ def check_gpu():
74
  os.environ['LD_LIBRARY_PATH'] = "/usr/local/cuda-12.1/lib64:" + os.environ.get('LD_LIBRARY_PATH', '')
75
  subprocess.run(['nvidia-smi']) # Test if CUDA is available
76
  print(f"torch.cuda.is_available:{torch.cuda.is_available()}")
77
- print("Device count:", torch.cuda.device_count())
 
78
  # test nvdiffrast
79
  import nvdiffrast.torch as dr
80
  dr.RasterizeCudaContext(device="cuda:0")
81
- print("nvdiffrast initialized successfully")
82
 
83
 
84
  # Only check GPU in non-UI debug mode
@@ -163,137 +164,114 @@ def save_py3dmesh_with_trimesh_fast(meshes, save_glb_path=TEMP_MESH_ADDRESS, app
163
  fix_vert_color_glb(save_glb_path)
164
  print(f"saving to {save_glb_path}")
165
 
166
- # Create model function substitutes for debug mode
167
- def debug_text_to_detailed(prompt, seed=None):
168
- print("DEBUG MODE: text_to_detailed was called")
169
- return f"This is a detailed prompt example in debug mode: '{prompt}'"
170
-
171
- def debug_text_to_image(prompt, seed=None, strength=1.0,lora_scale=1.0, num_inference_steps=18, redux_hparam=None, init_image=None, **kwargs):
172
- print("DEBUG MODE: text_to_image was called")
173
- # Return an example image path or create a solid color image
174
- example_image = Image.new('RGB', (512, 512), color=(73, 109, 137))
175
- return example_image
176
-
177
- def debug_bundle_image_to_mesh(*args, **kwargs):
178
- print("DEBUG MODE: bundle_image_to_mesh was called")
179
- # Return example video and mesh paths
180
- return "app_assets/logo_temp_.png", "app_assets/logo_temp_.png"
181
-
182
- # Select actual functionality or debug substitutes based on mode
183
- if UI_ONLY_MODE:
184
- text_to_detailed = debug_text_to_detailed
185
- text_to_image = debug_text_to_image
186
- bundle_image_to_mesh = debug_bundle_image_to_mesh
187
- print("UI debug mode functions loaded")
188
- else:
189
- @spaces.GPU
190
- def text_to_detailed(prompt, seed=None):
191
- # test nvdiffrast
192
- import nvdiffrast.torch as dr
193
- dr.RasterizeCudaContext(device="cuda:0")
194
- print("nvdiffrast initialized successfully")
195
-
196
- print(f"torch.cuda.is_available():{torch.cuda.is_available()}")
197
- # print(f"Before text_to_detailed: {torch.cuda.memory_allocated() / 1024**3} GB")
198
- return k3d_wrapper.get_detailed_prompt(prompt, seed)
199
-
200
- @spaces.GPU(duration=120)
201
- def text_to_image(prompt, seed=None, strength=1.0,lora_scale=1.0, num_inference_steps=18, redux_hparam=None, init_image=None, **kwargs):
202
- # subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
203
- # print(f"Before text_to_image: {torch.cuda.memory_allocated() / 1024**3} GB")
204
- # k3d_wrapper.flux_pipeline.enable_xformers_memory_efficient_attention()
205
- k3d_wrapper.renew_uuid()
206
- init_image = None
207
- # if init_image_path is not None:
208
- # init_image = Image.open(init_image_path)
209
- subprocess.run(['nvidia-smi']) # Test if CUDA is available
210
- with torch.no_grad():
211
- result = k3d_wrapper.generate_3d_bundle_image_text(
212
- prompt,
213
- image=init_image,
214
- strength=strength,
215
- lora_scale=lora_scale,
216
- num_inference_steps=num_inference_steps,
217
- seed=int(seed) if seed is not None else None,
218
- redux_hparam=redux_hparam,
219
- save_intermediate_results=True,
220
- **kwargs)
221
- return result[-1]
222
-
223
- @spaces.GPU(duration=120)
224
- def image2mesh_preprocess_(input_image_, seed, use_mv_rgb=True):
225
- global preprocessed_input_image
226
-
227
- seed = int(seed) if seed is not None else None
228
-
229
- # TODO: delete this later
230
- # k3d_wrapper.del_llm_model()
231
-
232
- input_image_save_path, reference_save_path, caption = image2mesh_preprocess(k3d_wrapper, input_image_, seed, use_mv_rgb)
233
-
234
- preprocessed_input_image = Image.open(input_image_save_path)
235
- return reference_save_path, caption
236
-
237
-
238
- @spaces.GPU(duration=120)
239
- def image2mesh_main_(reference_3d_bundle_image, caption, seed, strength1=0.5, strength2=0.95, enable_redux=True, use_controlnet=True, if_video=True):
240
- subprocess.run(['nvidia-smi'])
241
- global mesh_cache
242
- seed = int(seed) if seed is not None else None
243
-
244
-
245
- # TODO: delete this later
246
- # k3d_wrapper.del_llm_model()
247
-
248
- input_image = preprocessed_input_image
249
-
250
- reference_3d_bundle_image = torch.tensor(reference_3d_bundle_image).permute(2,0,1)/255
251
-
252
- gen_save_path, recon_mesh_path = image2mesh_main(k3d_wrapper, input_image, reference_3d_bundle_image, caption=caption, seed=seed, strength1=strength1, strength2=strength2, enable_redux=enable_redux, use_controlnet=use_controlnet)
253
- mesh_cache = recon_mesh_path
254
-
255
-
256
- if if_video:
257
- video_path = recon_mesh_path.replace('.obj','.mp4').replace('.glb','.mp4')
258
- render_video_from_obj(recon_mesh_path, video_path)
259
- print(f"After bundle_image_to_mesh: {torch.cuda.memory_allocated() / 1024**3} GB")
260
- return gen_save_path, video_path, mesh_cache
261
- else:
262
- return gen_save_path, recon_mesh_path, mesh_cache
263
- # return gen_save_path, recon_mesh_path
264
-
265
- @spaces.GPU(duration=120)
266
- def bundle_image_to_mesh(
267
- gen_3d_bundle_image,
268
- camera_radius=3.5,
269
- lrm_radius = 3.5,
270
- isomer_radius = 4.2,
271
- reconstruction_stage1_steps = 0,
272
- reconstruction_stage2_steps = 50,
273
- save_intermediate_results=False
274
- ):
275
- global mesh_cache
276
- print(f"Before bundle_image_to_mesh: {torch.cuda.memory_allocated() / 1024**3} GB")
277
- k3d_wrapper.recon_model.init_flexicubes_geometry("cuda:0", fovy=50.0)
278
- print(f"init_flexicubes_geometry done")
279
- # TODO: delete this later
280
- k3d_wrapper.del_llm_model()
281
-
282
- print(f"Before bundle_image_to_mesh after deleting llm model: {torch.cuda.memory_allocated() / 1024**3} GB")
283
-
284
- gen_3d_bundle_image = torch.tensor(gen_3d_bundle_image).permute(2,0,1)/255
285
-
286
- recon_mesh_path = k3d_wrapper.reconstruct_3d_bundle_image(gen_3d_bundle_image, camera_radius=camera_radius, lrm_render_radius=lrm_radius, isomer_radius=isomer_radius, save_intermediate_results=save_intermediate_results, reconstruction_stage1_steps=int(reconstruction_stage1_steps), reconstruction_stage2_steps=int(reconstruction_stage2_steps))
287
- mesh_cache = recon_mesh_path
288
-
289
- print(f"Mesh generated at: {mesh_cache}")
290
 
291
- # Check if file exists
292
- if not os.path.exists(mesh_cache):
293
- print(f"Warning: Generated mesh file does not exist: {mesh_cache}")
294
- return None, mesh_cache
295
-
296
- return recon_mesh_path, mesh_cache
297
 
298
  # _HEADER_=f"""
299
  # <img src="{LOGO_PATH}">
 
74
  os.environ['LD_LIBRARY_PATH'] = "/usr/local/cuda-12.1/lib64:" + os.environ.get('LD_LIBRARY_PATH', '')
75
  subprocess.run(['nvidia-smi']) # Test if CUDA is available
76
  print(f"torch.cuda.is_available:{torch.cuda.is_available()}")
77
+ print("Device count:", torch.cuda.device_count())
78
+
79
  # test nvdiffrast
80
  import nvdiffrast.torch as dr
81
  dr.RasterizeCudaContext(device="cuda:0")
82
+ print("nvdiffrast initialized successfully")
83
 
84
 
85
  # Only check GPU in non-UI debug mode
 
164
  fix_vert_color_glb(save_glb_path)
165
  print(f"saving to {save_glb_path}")
166
 
167
+ @spaces.GPU
168
+ def text_to_detailed(prompt, seed=None):
169
+ # test nvdiffrast
170
+ import nvdiffrast.torch as dr
171
+ dr.RasterizeCudaContext(device="cuda:0")
172
+ print("nvdiffrast initialized successfully")
173
+
174
+ print(f"torch.cuda.is_available():{torch.cuda.is_available()}")
175
+ # print(f"Before text_to_detailed: {torch.cuda.memory_allocated() / 1024**3} GB")
176
+ return k3d_wrapper.get_detailed_prompt(prompt, seed)
177
+
178
+ @spaces.GPU(duration=120)
179
+ def text_to_image(prompt, seed=None, strength=1.0,lora_scale=1.0, num_inference_steps=18, redux_hparam=None, init_image=None, **kwargs):
180
+ # subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
181
+ # print(f"Before text_to_image: {torch.cuda.memory_allocated() / 1024**3} GB")
182
+ # k3d_wrapper.flux_pipeline.enable_xformers_memory_efficient_attention()
183
+ k3d_wrapper.renew_uuid()
184
+ init_image = None
185
+ # if init_image_path is not None:
186
+ # init_image = Image.open(init_image_path)
187
+ subprocess.run(['nvidia-smi']) # Test if CUDA is available
188
+ with torch.no_grad():
189
+ result = k3d_wrapper.generate_3d_bundle_image_text(
190
+ prompt,
191
+ image=init_image,
192
+ strength=strength,
193
+ lora_scale=lora_scale,
194
+ num_inference_steps=num_inference_steps,
195
+ seed=int(seed) if seed is not None else None,
196
+ redux_hparam=redux_hparam,
197
+ save_intermediate_results=True,
198
+ **kwargs)
199
+ return result[-1]
200
+
201
+ @spaces.GPU(duration=120)
202
+ def image2mesh_preprocess_(input_image_, seed, use_mv_rgb=True):
203
+ global preprocessed_input_image
204
+
205
+ seed = int(seed) if seed is not None else None
206
+
207
+ # TODO: delete this later
208
+ # k3d_wrapper.del_llm_model()
209
+
210
+ input_image_save_path, reference_save_path, caption = image2mesh_preprocess(k3d_wrapper, input_image_, seed, use_mv_rgb)
211
+
212
+ preprocessed_input_image = Image.open(input_image_save_path)
213
+ return reference_save_path, caption
214
+
215
+
216
+ @spaces.GPU(duration=120)
217
+ def image2mesh_main_(reference_3d_bundle_image, caption, seed, strength1=0.5, strength2=0.95, enable_redux=True, use_controlnet=True, if_video=True):
218
+ subprocess.run(['nvidia-smi'])
219
+ global mesh_cache
220
+ seed = int(seed) if seed is not None else None
221
+
222
+
223
+ # TODO: delete this later
224
+ # k3d_wrapper.del_llm_model()
225
+
226
+ input_image = preprocessed_input_image
227
+
228
+ reference_3d_bundle_image = torch.tensor(reference_3d_bundle_image).permute(2,0,1)/255
229
+
230
+ gen_save_path, recon_mesh_path = image2mesh_main(k3d_wrapper, input_image, reference_3d_bundle_image, caption=caption, seed=seed, strength1=strength1, strength2=strength2, enable_redux=enable_redux, use_controlnet=use_controlnet)
231
+ mesh_cache = recon_mesh_path
232
+
233
+
234
+ if if_video:
235
+ video_path = recon_mesh_path.replace('.obj','.mp4').replace('.glb','.mp4')
236
+ render_video_from_obj(recon_mesh_path, video_path)
237
+ print(f"After bundle_image_to_mesh: {torch.cuda.memory_allocated() / 1024**3} GB")
238
+ return gen_save_path, video_path, mesh_cache
239
+ else:
240
+ return gen_save_path, recon_mesh_path, mesh_cache
241
+ # return gen_save_path, recon_mesh_path
242
+
243
+ @spaces.GPU(duration=120)
244
+ def bundle_image_to_mesh(
245
+ gen_3d_bundle_image,
246
+ camera_radius=3.5,
247
+ lrm_radius = 3.5,
248
+ isomer_radius = 4.2,
249
+ reconstruction_stage1_steps = 0,
250
+ reconstruction_stage2_steps = 50,
251
+ save_intermediate_results=False
252
+ ):
253
+ global mesh_cache
254
+ print(f"Before bundle_image_to_mesh: {torch.cuda.memory_allocated() / 1024**3} GB")
255
+ k3d_wrapper.recon_model.init_flexicubes_geometry("cuda:0", fovy=50.0)
256
+ print(f"init_flexicubes_geometry done")
257
+ # TODO: delete this later
258
+ k3d_wrapper.del_llm_model()
259
+
260
+ print(f"Before bundle_image_to_mesh after deleting llm model: {torch.cuda.memory_allocated() / 1024**3} GB")
261
+
262
+ gen_3d_bundle_image = torch.tensor(gen_3d_bundle_image).permute(2,0,1)/255
263
+
264
+ recon_mesh_path = k3d_wrapper.reconstruct_3d_bundle_image(gen_3d_bundle_image, camera_radius=camera_radius, lrm_render_radius=lrm_radius, isomer_radius=isomer_radius, save_intermediate_results=save_intermediate_results, reconstruction_stage1_steps=int(reconstruction_stage1_steps), reconstruction_stage2_steps=int(reconstruction_stage2_steps))
265
+ mesh_cache = recon_mesh_path
266
+
267
+ print(f"Mesh generated at: {mesh_cache}")
268
+
269
+ # Check if file exists
270
+ if not os.path.exists(mesh_cache):
271
+ print(f"Warning: Generated mesh file does not exist: {mesh_cache}")
272
+ return None, mesh_cache
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273
 
274
+ return recon_mesh_path, mesh_cache
 
 
 
 
 
275
 
276
  # _HEADER_=f"""
277
  # <img src="{LOGO_PATH}">