import gradio as gr import spaces import torch from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler from diffusers.utils import export_to_video import cv2 import numpy as np pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.enable_vae_slicing() @spaces.GPU(duration=250) def generate(prompt, num_inference_steps=25, frames=200): video_frames = pipe(prompt, num_inference_steps, num_frames).frames video_path = export_to_video(video_frames) return video_path prompt = gr.Textbox("Enter prompt to generate a video") num_inference_steps = gr.Slider(10, 50, value=25) interface = gr.Interface( generate, inputs=[prompt, num_inference_steps], examples=[["Astronaut riding a horse", 25], ["Darth vader surfing in waves", 20]], outputs="video", cache_examples=False, theme="soft" ).launch()