Spaces:
Running
Running
| import torch | |
| import gradio as gr | |
| from diffusers import StableVideoDiffusionPipeline | |
| from PIL import Image | |
| import numpy as np | |
| from moviepy import ImageSequenceClip | |
| import spaces | |
| # Load the pipeline | |
| pipeline = StableVideoDiffusionPipeline.from_pretrained( | |
| "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" | |
| ) | |
| pipeline.enable_model_cpu_offload() | |
| def generate_video(image, seed): | |
| # Preprocess the image | |
| image = Image.open(image) | |
| image = image.resize((1024, 576)) | |
| # Set the generator seed | |
| generator = torch.manual_seed(seed) | |
| # Generate the video frames | |
| frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0] | |
| # Convert frames to a format suitable for video export | |
| frames = [(frame * 255).astype(np.uint8) for frame in frames] | |
| # Export the frames to a video file | |
| clip = ImageSequenceClip(frames, fps=7) | |
| output_video_path = "generated.mp4" | |
| clip.write_videofile(output_video_path, codec="libx264") | |
| return output_video_path | |
| # Create the Gradio interface | |
| iface = gr.Interface( | |
| fn=generate_video, | |
| inputs=[ | |
| gr.Image(type="file", label="Upload Image"), | |
| gr.Number(label="Seed", value=42) | |
| ], | |
| outputs=gr.Video(label="Generated Video"), | |
| title="Stable Video Diffusion", | |
| description="Generate a video from an uploaded image using Stable Video Diffusion." | |
| ) | |
| # Launch the interface | |
| iface.launch() |