Spaces:
Paused
Paused
File size: 1,810 Bytes
653ce35 7ca5351 454eedf 39489bd 0cbec0b acf84db 39489bd 454eedf 39489bd 3bda188 e59e841 3bda188 39489bd 2e78fa3 39489bd 454eedf 39489bd 454eedf f6be7c6 39489bd 7ca5351 39489bd 7ca5351 39489bd 7ca5351 39489bd 7ca5351 0269ee9 39489bd 0269ee9 106f93a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import gradio as gr
import os
from huggingface_hub import snapshot_download
model_ids = [
'runwayml/stable-diffusion-v1-5',
'lllyasviel/sd-controlnet-depth',
'lllyasviel/sd-controlnet-canny',
'lllyasviel/sd-controlnet-openpose',
]
for model_id in model_ids:
model_name = model_id.split('/')[-1]
snapshot_download(model_id, local_dir=f'checkpoints/{model_name}')
import subprocess
def run_inference(prompt, video_path, condition, video_length):
print(video_length)
video_length = int(video_length)
print(video_length)
command = "python inference.py --prompt prompt --condition condition --video_path video_path --output_path 'outputs/' --video_length 2 --smoother_steps 19 20"
output = subprocess.check_output(command, shell=True, text=True)
output = output.strip() # Remove any leading/trailing whitespace
# Process the output as needed
print("Command output:", output)
return "done"
#return f"{output_path}/{prompt}.mp4"
with gr.Blocks() as demo:
with gr.Column():
prompt = gr.Textbox(label="prompt")
video_path = gr.Video(source="upload", type="filepath")
condition = gr.Textbox(label="Condition", value="depth")
video_length = gr.Slider(label="video length", minimum=1, maximum=15, step=1, value=2)
#seed = gr.Number(label="seed", value=42)
submit_btn = gr.Button("Submit")
#video_res = gr.Video(label="result")
video_res = gr.Textbox(label="result")
submit_btn.click(fn=run_inference,
inputs=[prompt,
video_path,
condition,
video_length
],
outputs=[video_res])
demo.queue(max_size=12).launch() |