Spaces:
Running
Running
File size: 1,601 Bytes
c28feac 5764d8b 2ee3801 c28feac e157aaa c28feac b56c42a 2ee3801 ed7eb3f 2ee3801 ed7eb3f 5764d8b e157aaa 2ee3801 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import gradio as gr
from videopose_PSTMO import gr_video2mc
import os
# ffmpeg -i input_videos/kun_1280x720_30fps_0-14_0-32.mp4 -vf trim=0:5,setpts=PTS-STARTPTS input_videos/kun_test_5sec.mp4
# ffmpeg -i input.mp4 -vf scale=320:-1 output.mp4
def Video2MC(video, progress=gr.Progress(track_tqdm=True)):
progress(1.0, desc="Step 0: Starting")
output_path, output_video = gr_video2mc(video, progress)
return output_path, output_path, output_video
with gr.Blocks() as iface:
text1 = gr.Markdown(
"""
<div align=center>
<img src="https://github.com/Balloon-356/Video2MC/assets/114230565/2622c7b7-7b5d-458c-bd9a-dc0be37af370" />
"""
# </div>
# <h1 align="center">Video2MC: 基于3D人体姿态估计的MC动画自动生成</h1>
)
with gr.Row():
with gr.Column():
input_video = gr.Video()
with gr.Row():
btn_c = gr.ClearButton(input_video)
btn_s = gr.Button("Submit", variant='primary')
gr.Examples([os.path.join(os.path.dirname(__file__),
"input_videos/kun_test_5sec.mp4")], input_video)
with gr.Column():
output_miframes = gr.File()
output_path = gr.Text()
output_video = gr.Video()
btn_s.click(Video2MC, inputs=[input_video], outputs=[output_miframes, output_path, output_video])
iface.queue(concurrency_count=10).launch() |