File size: 3,256 Bytes
3da52d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
#!/usr/bin/env python

import os
import pathlib
import tempfile

import gradio as gr
import torch
from huggingface_hub import snapshot_download
from modelscope.outputs import OutputKeys
from modelscope.pipelines import pipeline

DESCRIPTION = """# I2VGen-XL

I2VGen-XL can generate videos that are semantically similar to the input image and text. The generated videos are high-definition (1280 * 720), wide-screen (16:9), temporally coherent, and have good texture.
"""

if torch.cuda.is_available():
    model_cache_dir = os.getenv("MODEL_CACHE_DIR", "./models")

    image2video_model_dir = pathlib.Path(model_cache_dir) / "MS-Image2Video"
    snapshot_download(repo_id="damo-vilab/MS-Image2Video", repo_type="model", local_dir=image2video_model_dir)
    image_to_video_pipe = pipeline(
        task="image-to-video", model=image2video_model_dir.as_posix(), model_revision="v1.1.0", device="cuda:0"
    )

    video2video_model_dir = pathlib.Path(model_cache_dir) / "MS-Vid2Vid-XL"
    snapshot_download(repo_id="damo-vilab/MS-Vid2Vid-XL", repo_type="model", local_dir=video2video_model_dir)
    video_to_video_pipe = pipeline(
        task="video-to-video", model=video2video_model_dir.as_posix(), model_revision="v1.1.0", device="cuda:0"
    )
else:
    image_to_video_pipe = None
    video_to_video_pipe = None


def image_to_video(image_path: str) -> str:
    output_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
    image_to_video_pipe(image_path, output_video=output_file.name)[OutputKeys.OUTPUT_VIDEO]
    return output_file.name


def video_to_video(video_path: str, text: str) -> str:
    p_input = {"video_path": video_path, "text": text}
    output_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
    video_to_video_pipe(p_input, output_video=output_file.name)[OutputKeys.OUTPUT_VIDEO]
    return output_file.name


with gr.Blocks(css="style.css") as demo:
    gr.Markdown(DESCRIPTION)
    with gr.Box():
        gr.Markdown('Step 1: Upload an image and click the "Generate video" button.')
        with gr.Row():
            with gr.Column():
                input_image = gr.Image(label="Input image", type="filepath", height=300)
                i2v_button = gr.Button("Generate video")
            with gr.Column():
                output_video_1 = gr.Video(label="Output video 1", interactive=False, height=300)
    with gr.Box():
        gr.Markdown(
            'Step 2: Add an English text description of the video content and click the "Generate high-resolution video" button.'
        )
        with gr.Row():
            with gr.Column():
                text_description = gr.Textbox(label="Text description")
                v2v_button = gr.Button("Generate high-resolution video")
            with gr.Column():
                output_video_2 = gr.Video(label="Output video 2", height=300)

    i2v_button.click(
        fn=image_to_video,
        inputs=input_image,
        outputs=output_video_1,
        api_name="image-to-video",
    )
    v2v_button.click(
        fn=video_to_video,
        inputs=[output_video_1, text_description],
        outputs=output_video_2,
        api_name="video-to-video",
    )

if __name__ == "__main__":
    demo.queue(max_size=10, api_open=False).launch()