QZFantasies commited on
Commit
e505fbf
·
1 Parent(s): c57ef32
Files changed (1) hide show
  1. app.py +25 -27
app.py CHANGED
@@ -589,39 +589,20 @@ def demo_lhm(pose_estimator, face_detector, lhm, cfg):
589
 
590
  os.makedirs(os.path.dirname(dump_video_path), exist_ok=True)
591
 
 
 
 
 
 
592
  images_to_video(
593
  rgb,
594
  output_path=dump_video_path,
595
- fps=render_fps,
596
  gradio_codec=False,
597
  verbose=True,
598
  )
599
-
600
- # self.infer_single(
601
- # image_path,
602
- # motion_seqs_dir=motion_seqs_dir,
603
- # motion_img_dir=None,
604
- # motion_video_read_fps=30,
605
- # export_video=False,
606
- # export_mesh=False,
607
- # dump_tmp_dir=dump_image_dir,
608
- # dump_image_dir=dump_image_dir,
609
- # dump_video_path=dump_video_path,
610
- # shape_param=shape_pose.beta,
611
- # )
612
-
613
- # status = spaces.GPU(infer_impl(
614
- # gradio_demo_image=image_raw,
615
- # gradio_motion_file=smplx_params_dir,
616
- # gradio_masked_image=dump_image_path,
617
- # gradio_video_save_path=dump_video_path
618
- # ))
619
-
620
  return dump_image_path, dump_video_path
621
- # if status:
622
- # return dump_image_path, dump_video_path
623
- # else:
624
- # return None, None
625
 
626
  _TITLE = '''LHM: Large Animatable Human Model'''
627
 
@@ -643,6 +624,23 @@ def demo_lhm(pose_estimator, face_detector, lhm, cfg):
643
  </div>
644
  """
645
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
646
  gr.HTML(
647
  """<p><h4 style="color: red;"> Notes: Please input full-body image in case of detection errors. We simplify the pipeline in spaces: 1) using Rembg instead of SAM2; 2) limit the output video length to 10s; For best visual quality, try the inference code on Github instead.</h4></p>"""
648
  )
@@ -730,7 +728,7 @@ def demo_lhm(pose_estimator, face_detector, lhm, cfg):
730
  outputs=[working_dir],
731
  queue=False,
732
  ).success(
733
- fn=core_fn,
734
  inputs=[input_image, video_input, working_dir], # video_params refer to smpl dir
735
  outputs=[processed_image, output_video],
736
  )
 
589
 
590
  os.makedirs(os.path.dirname(dump_video_path), exist_ok=True)
591
 
592
+ return rgb, dump_video_path
593
+
594
+ def core_fn_export(image, video_params, working_dir):
595
+ rgb, dump_video_path = core_fn(image=image, video_params=video_params, working_dir=working_dir)
596
+
597
  images_to_video(
598
  rgb,
599
  output_path=dump_video_path,
600
+ fps=30,
601
  gradio_codec=False,
602
  verbose=True,
603
  )
604
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605
  return dump_image_path, dump_video_path
 
 
 
 
606
 
607
  _TITLE = '''LHM: Large Animatable Human Model'''
608
 
 
624
  </div>
625
  """
626
  )
627
+ gr.Markdown(
628
+ """
629
+ <p align="center">
630
+ <a title="Website" href="https://lingtengqiu.github.io/LHM/" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
631
+ <img src="https://www.obukhov.ai/img/badges/badge-website.svg">
632
+ </a>
633
+ <a title="arXiv" href="https://arxiv.org/pdf/2503.10625" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
634
+ <img src="https://www.obukhov.ai/img/badges/badge-pdf.svg">
635
+ </a>
636
+ <a title="Github" href="https://github.com/aigc3d/LHM" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
637
+ <img src="https://img.shields.io/github/stars/aigc3d/LHM?label=GitHub%20%E2%98%85&logo=github&color=C8C" alt="badge-github-stars">
638
+ </a>
639
+ <a title="Video" href="https://www.youtube.com/watch?v=tivEpz_yiEo" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
640
+ <img src="https://img.shields.io/badge/YouTube-QiuLingteng-red?logo=youtube" alt="Video">
641
+ </a>
642
+ """
643
+ )
644
  gr.HTML(
645
  """<p><h4 style="color: red;"> Notes: Please input full-body image in case of detection errors. We simplify the pipeline in spaces: 1) using Rembg instead of SAM2; 2) limit the output video length to 10s; For best visual quality, try the inference code on Github instead.</h4></p>"""
646
  )
 
728
  outputs=[working_dir],
729
  queue=False,
730
  ).success(
731
+ fn=core_fn_export,
732
  inputs=[input_image, video_input, working_dir], # video_params refer to smpl dir
733
  outputs=[processed_image, output_video],
734
  )