|
import torch |
|
import gradio as gr |
|
from diffusers import StableVideoDiffusionPipeline |
|
from diffusers.utils import load_image, export_to_video |
|
import spaces |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
pipeline = StableVideoDiffusionPipeline.from_pretrained( |
|
"stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" |
|
) |
|
pipeline.to(device) |
|
|
|
@spaces.GPU(duration=120) |
|
def generate_video(image_path, seed): |
|
|
|
image = load_image(image_path) |
|
image = image.resize((1024, 576)) |
|
|
|
|
|
generator = torch.Generator(device=device).manual_seed(seed) |
|
|
|
|
|
frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0] |
|
|
|
|
|
output_video_path = "generated.mp4" |
|
export_to_video(frames, output_video_path, fps=7) |
|
|
|
return output_video_path |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_video, |
|
inputs=[ |
|
gr.Image(type="filepath", label="Upload Image"), |
|
gr.Number(label="Seed", value=42) |
|
], |
|
outputs=gr.Video(label="Generated Video"), |
|
title="Stable Video Diffusion", |
|
description="Generate a video from an uploaded image using Stable Video Diffusion.", |
|
) |
|
|
|
|
|
iface.launch() |