Spaces:
Runtime error
Runtime error
File size: 1,482 Bytes
9d668ad 3039940 25ef180 aa5e404 0bb12d1 9d668ad c8ca6fe a4dc2bc 3039940 c8ca6fe 9d668ad aa5e404 2893544 a1cfbda 2893544 a1cfbda 2893544 25ef180 aa5e404 5612db5 c8ca6fe aa5e404 c8ca6fe 4215842 c8ca6fe a52084a dc4366f c8ca6fe 3039940 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import spaces
import torch,os,imageio
from diffusers import StableVideoDiffusionPipeline
from diffusers.utils import load_image, export_to_video
from PIL import Image
import numpy as np
# Check if CUDA (GPU) is available, otherwise use CPU
device = "cuda" if torch.cuda.is_available() else "cpu"
def save_video(frames, save_path, fps, quality=9):
writer = imageio.get_writer(save_path, fps=fps, quality=quality)
for frame in frames:
frame = np.array(frame)
writer.append_data(frame)
writer.close()
# Function to generate the video
@spaces.GPU(duration=100)
def Video(image):
pipeline = StableVideoDiffusionPipeline.from_pretrained(
"stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16
).to(device)
# Enable model offloading if using the CPU
if device == "cpu":
pipeline.enable_model_cpu_offload()
else:
pipeline.enable_sequential_cpu_offload()
image = Image.fromarray(image)
image = image.resize((1024, 576))
# Set random seed for reproducibility
generator = torch.manual_seed(42)
# Ensure the image is moved to the appropriate device (GPU or CPU)
# image = image.to(device)
# Generate the video frames
frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0]
path="generated.mp4"
# Export the frames to a video file
save_video(frames, path, fps=7)
# vid=cv2.VideoCapture(path)
return path |