Spaces:
Paused
Paused
| import gradio as gr | |
| import os | |
| from huggingface_hub import snapshot_download | |
| model_ids = [ | |
| 'runwayml/stable-diffusion-v1-5', | |
| 'lllyasviel/sd-controlnet-depth', | |
| 'lllyasviel/sd-controlnet-canny', | |
| 'lllyasviel/sd-controlnet-openpose', | |
| ] | |
| for model_id in model_ids: | |
| model_name = model_id.split('/')[-1] | |
| snapshot_download(model_id, local_dir=f'checkpoints/{model_name}') | |
| import subprocess | |
| def run_inference(prompt, video_path, condition, video_length): | |
| # Call the function to get the video properties | |
| # duration, fps = get_video_properties(video_path) | |
| #print(fps) | |
| #video_length = int(video_length * fps) | |
| output_path = 'output/' | |
| os.makedirs(output_path, exist_ok=True) | |
| command = f"python inference.py --prompt '{prompt}' --condition '{condition}' --video_path '{video_path}' --output_path '{output_path}' --video_length {video_length} --smoother_steps 19 20" | |
| subprocess.run(command, shell=True) | |
| # Construct the video path | |
| video_path_output = os.path.join(output_path, f"{prompt}.mp4") | |
| return "done", video_path_output | |
| #return f"{output_path}/{prompt}.mp4" | |
| with gr.Blocks() as demo: | |
| with gr.Column(): | |
| prompt = gr.Textbox(label="prompt") | |
| video_path = gr.Video(source="upload", type="filepath") | |
| condition = gr.Textbox(label="Condition", value="depth") | |
| video_length = gr.Slider(label="video length", minimum=1, maximum=15, step=1, value=2) | |
| #seed = gr.Number(label="seed", value=42) | |
| submit_btn = gr.Button("Submit") | |
| video_res = gr.Video(label="result") | |
| status = gr.Textbox(label="result") | |
| submit_btn.click(fn=run_inference, | |
| inputs=[prompt, | |
| video_path, | |
| condition, | |
| video_length | |
| ], | |
| outputs=[status, video_res]) | |
| demo.queue(max_size=12).launch() |