Spaces:
Runtime error
Runtime error
File size: 1,293 Bytes
5b8b126 81c5e3c 7bded4c 81c5e3c 3eb6ac8 81c5e3c 09bf68a 81c5e3c f4db27b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
#Imports
import gradio as gr
from diffusers import DiffusionPipeline
from diffusers.schedulers import DPMSolverMultistepScheduler
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
from diffusers.utils import export_to_video
from base64 import b64encode
import torch
device = "cpu" # Force CPU usage
# Load pipeline (outside the function for efficiency)
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
# pipe.enable_model_cpu_offload()
pipe.enable_vae_slicing()
def Generate_video(prompt, video_duration_seconds):
num_frames = video_duration_seconds * 10
video_frames = pipe(prompt=prompt, negative_prompt="low quality",
num_inference_steps=25, num_frames=num_frames).frames
video_path = export_to_video(video_frames) # Assuming you have this function defined
return video_path
# Create Gradio interface
iface = gr.Interface(
fn=Generate_video,
inputs=[
gr.Textbox(lines=5, label="Prompt"),
gr.Number(label="Video Duration (seconds)", value=3),
],
outputs=gr.Video(label="Generated Video"),
)
# Launch the app
iface.launch(debug=True)
|