Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
a8dfdef
1
Parent(s):
388f445
update notes and limit time
Browse files- LHM/utils/ffmpeg_utils.py +6 -4
- app.py +1 -1
LHM/utils/ffmpeg_utils.py
CHANGED
@@ -46,13 +46,15 @@ def images_to_video(images, output_path, fps, gradio_codec: bool, verbose=False,
|
|
46 |
f"Frame shape mismatch: {frame.shape} vs {images.shape}"
|
47 |
assert frame.min() >= 0 and frame.max() <= 255, \
|
48 |
f"Frame value out of range: {frame.min()} ~ {frame.max()}"
|
49 |
-
|
50 |
else:
|
51 |
frame = images[i]
|
52 |
-
width, height = frame.shape[1], frame.shape[0]
|
53 |
# reshape to limit the export time
|
54 |
-
if width > 1200 or height > 1200 or images.shape[0] > 400:
|
55 |
-
|
|
|
|
|
|
|
56 |
|
57 |
frames = np.stack(frames)
|
58 |
iio.imwrite(output_path,frames,fps=fps,codec="libx264",pixelformat="yuv420p",bitrate=bitrate,macro_block_size=16)
|
|
|
46 |
f"Frame shape mismatch: {frame.shape} vs {images.shape}"
|
47 |
assert frame.min() >= 0 and frame.max() <= 255, \
|
48 |
f"Frame value out of range: {frame.min()} ~ {frame.max()}"
|
|
|
49 |
else:
|
50 |
frame = images[i]
|
51 |
+
# width, height = frame.shape[1], frame.shape[0]
|
52 |
# reshape to limit the export time
|
53 |
+
# if width > 1200 or height > 1200 or images.shape[0] > 400:
|
54 |
+
# frames.append(cv2.resize(frame, (width // 2, height // 2)))
|
55 |
+
|
56 |
+
# limit the frames directly @NOTE huggingface only!
|
57 |
+
frames = frames[:300]
|
58 |
|
59 |
frames = np.stack(frames)
|
60 |
iio.imwrite(output_path,frames,fps=fps,codec="libx264",pixelformat="yuv420p",bitrate=bitrate,macro_block_size=16)
|
app.py
CHANGED
@@ -644,7 +644,7 @@ def demo_lhm(pose_estimator, face_detector, lhm, cfg):
|
|
644 |
"""
|
645 |
)
|
646 |
gr.HTML(
|
647 |
-
"""<p><h4 style="color: red;"> Notes: Please input full-body image in case of detection errors.</h4></p>"""
|
648 |
)
|
649 |
|
650 |
# DISPLAY
|
|
|
644 |
"""
|
645 |
)
|
646 |
gr.HTML(
|
647 |
+
"""<p><h4 style="color: red;"> Notes: Please input full-body image in case of detection errors. We simplify the pipeline in spaces: 1) using Rembg instead of SAM2; 2) limit the output video length to 10s; For best visual quality, try the inference code on Github instead.</h4></p>"""
|
648 |
)
|
649 |
|
650 |
# DISPLAY
|