File size: 1,948 Bytes
653ce35
7ca5351
454eedf
39489bd
0cbec0b
acf84db
 
 
 
 
 
 
 
 
 
39489bd
3a66e37
 
 
 
454eedf
39489bd
3a66e37
027be85
 
5380d37
6fcb174
 
 
eaf8a3c
454eedf
eaf8a3c
6fcb174
eaf8a3c
454eedf
 
f6be7c6
39489bd
7ca5351
 
 
 
 
 
 
39489bd
7ca5351
79917e9
 
7ca5351
39489bd
7ca5351
0269ee9
 
39489bd
0269ee9
eaf8a3c
106f93a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
import os

from huggingface_hub import snapshot_download

model_ids = [
    'runwayml/stable-diffusion-v1-5',
    'lllyasviel/sd-controlnet-depth', 
    'lllyasviel/sd-controlnet-canny', 
    'lllyasviel/sd-controlnet-openpose',
]
for model_id in model_ids:
    model_name = model_id.split('/')[-1]
    snapshot_download(model_id, local_dir=f'checkpoints/{model_name}')

import subprocess





def run_inference(prompt, video_path, condition, video_length):
    # Call the function to get the video properties
    # duration, fps = get_video_properties(video_path)
    #print(fps)
    #video_length = int(video_length * fps)
    output_path = 'output/'
    os.makedirs(output_path, exist_ok=True)
    command = f"python inference.py --prompt '{prompt}' --condition '{condition}' --video_path '{video_path}' --output_path '{output_path}' --video_length {video_length} --smoother_steps 19 20"
    subprocess.run(command, shell=True)

    # Construct the video path
    video_path_output = os.path.join(output_path, f"{prompt}.mp4")
    return "done", video_path_output 

    

    #return f"{output_path}/{prompt}.mp4"

with gr.Blocks() as demo:
    with gr.Column():
        prompt = gr.Textbox(label="prompt")
        video_path = gr.Video(source="upload", type="filepath")
        condition = gr.Textbox(label="Condition", value="depth")
        video_length = gr.Slider(label="video length", minimum=1, maximum=15, step=1, value=2)
        #seed = gr.Number(label="seed", value=42)
        submit_btn = gr.Button("Submit")
        video_res = gr.Video(label="result")
        status = gr.Textbox(label="result")

    submit_btn.click(fn=run_inference, 
                     inputs=[prompt,
                             video_path,
                             condition,
                             video_length
                            ],
                    outputs=[status, video_res])

demo.queue(max_size=12).launch()