|
import gradio as gr |
|
import torch |
|
from diffusers import I2VGenXLPipeline |
|
from diffusers.utils import export_to_gif, load_image |
|
import tempfile |
|
import spaces |
|
|
|
@spaces.GPU |
|
def initialize_pipeline(): |
|
|
|
pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16") |
|
return pipeline |
|
|
|
def generate_gif(prompt, image, negative_prompt, num_inference_steps, guidance_scale, seed): |
|
|
|
pipeline = initialize_pipeline() |
|
|
|
|
|
generator = torch.Generator().manual_seed(seed) |
|
|
|
|
|
if image is not None: |
|
image = load_image(image).convert("RGB") |
|
frames = pipeline( |
|
prompt=prompt, |
|
image=image, |
|
num_inference_steps=num_inference_steps, |
|
negative_prompt=negative_prompt, |
|
guidance_scale=guidance_scale, |
|
generator=generator |
|
).frames[0] |
|
else: |
|
frames = pipeline( |
|
prompt=prompt, |
|
num_inference_steps=num_inference_steps, |
|
negative_prompt=negative_prompt, |
|
guidance_scale=guidance_scale, |
|
generator=generator |
|
).frames[0] |
|
|
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".gif") as tmp_gif: |
|
gif_path = tmp_gif.name |
|
export_to_gif(frames, gif_path) |
|
|
|
return gif_path |
|
|
|
|
|
with gr.Blocks() as demo: |
|
with gr.Tabs(): |
|
with gr.TabItem("Generate from Text"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
text_prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt") |
|
text_negative_prompt = gr.Textbox(lines=2, placeholder="Enter your negative prompt here...", label="Negative Prompt") |
|
text_num_inference_steps = gr.Slider(1, 100, step=1, value=50, label="Number of Inference Steps") |
|
text_guidance_scale = gr.Slider(1, 20, step=0.1, value=9.0, label="Guidance Scale") |
|
text_seed = gr.Number(label="Seed", value=8888) |
|
text_generate_button = gr.Button("Generate GIF") |
|
|
|
with gr.Column(): |
|
text_output_video = gr.Video(label="Generated GIF") |
|
|
|
|
|
text_generate_button.click( |
|
fn=generate_gif, |
|
inputs=[text_prompt, None, text_negative_prompt, text_num_inference_steps, text_guidance_scale, text_seed], |
|
outputs=text_output_video |
|
) |
|
|
|
with gr.TabItem("Generate from Image"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
image_prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt") |
|
image_input = gr.Image(type="filepath", label="Input Image") |
|
image_negative_prompt = gr.Textbox(lines=2, placeholder="Enter your negative prompt here...", label="Negative Prompt") |
|
image_num_inference_steps = gr.Slider(1, 100, step=1, value=50, label="Number of Inference Steps") |
|
image_guidance_scale = gr.Slider(1, 20, step=0.1, value=9.0, label="Guidance Scale") |
|
image_seed = gr.Number(label="Seed", value=8888) |
|
image_generate_button = gr.Button("Generate GIF") |
|
|
|
with gr.Column(): |
|
image_output_video = gr.Video(label="Generated GIF") |
|
|
|
image_generate_button.click( |
|
fn=generate_gif, |
|
inputs=[image_prompt, image_input, image_negative_prompt, image_num_inference_steps, image_guidance_scale, image_seed], |
|
outputs=image_output_video |
|
) |
|
|
|
|
|
demo.launch() |
|
|