import gradio as gr import spaces import torch from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler from diffusers.utils import export_to_video import cv2 import numpy as np pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.enable_vae_slicing() @spaces.GPU(duration=250) def generate(prompt, num_inference_steps, num_frames): video_frames = pipe(prompt, num_inference_steps, num_frames).frames video_path = export_to_video(video_frames) return video_path prompt = gr.Textbox(label="Enter prompt to generate a video") num_inference_steps = gr.Slider(10, 50, value=25) num_frames = gr.Slider(100, 500, value=200) interface = gr.Interface( generate, inputs=[prompt, num_inference_steps, num_frames], examples=[["Astronaut riding a horse", 24, 256], ["Darth vader surfing in waves", 32, 128]], outputs="video", cache_examples=False, theme="soft" ).launch()