File size: 2,791 Bytes
b7f7bb6
 
 
 
fe77a8e
d03a679
b7f7bb6
d03a679
fc982fe
d03a679
fc982fe
b7f7bb6
d03a679
 
 
 
b7f7bb6
30a8deb
 
 
 
b7f7bb6
fe77a8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b7f7bb6
 
fe77a8e
 
 
b7f7bb6
 
 
fe77a8e
d03a679
fe77a8e
d03a679
 
 
 
 
 
 
 
 
 
 
 
 
 
fe77a8e
30a8deb
d03a679
fe77a8e
b7f7bb6
 
d03a679
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
import torch
from diffusers import I2VGenXLPipeline
from diffusers.utils import export_to_gif, load_image
import tempfile
import spaces

@spaces.GPU
def initialize_pipeline():
    # Check if CUDA is available and set the device
    device = "cuda" if torch.cuda.is_available() else "cpu"

    # Initialize the pipeline with CUDA support
    pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
    pipeline.to(device)
    return pipeline, device

def generate_gif(prompt, image, negative_prompt, num_inference_steps, guidance_scale, seed):
    # Initialize the pipeline and device within the function
    pipeline, device = initialize_pipeline()

    # Set the generator seed
    generator = torch.Generator(device=device).manual_seed(seed)

    # Check if an image is provided
    if image is not None:
        image = load_image(image).convert("RGB")
        frames = pipeline(
            prompt=prompt,
            image=image,
            num_inference_steps=num_inference_steps,
            negative_prompt=negative_prompt,
            guidance_scale=guidance_scale,
            generator=generator
        ).frames[0]
    else:
        frames = pipeline(
            prompt=prompt,
            num_inference_steps=num_inference_steps,
            negative_prompt=negative_prompt,
            guidance_scale=guidance_scale,
            generator=generator
        ).frames[0]

    # Export to GIF
    with tempfile.NamedTemporaryFile(delete=False, suffix=".gif") as tmp_gif:
        gif_path = tmp_gif.name
        export_to_gif(frames, gif_path)

    return gif_path

# Create the Gradio interface with tabs
with gr.Blocks() as demo:
    with gr.TabItem("Generate from Text or Image"):
        with gr.Row():
            with gr.Column():
                prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt")
                image = gr.Image(type="filepath", label="Input Image (optional)")
                negative_prompt = gr.Textbox(lines=2, placeholder="Enter your negative prompt here...", label="Negative Prompt")
                num_inference_steps = gr.Slider(1, 100, step=1, value=50, label="Number of Inference Steps")
                guidance_scale = gr.Slider(1, 20, step=0.1, value=9.0, label="Guidance Scale")
                seed = gr.Number(label="Seed", value=8888)
                generate_button = gr.Button("Generate GIF")

            with gr.Column():
                output_video = gr.Video(label="Generated GIF")

        generate_button.click(
            fn=generate_gif,
            inputs=[prompt, image, negative_prompt, num_inference_steps, guidance_scale, seed],
            outputs=output_video
        )

# Launch the interface
demo.launch()