File size: 1,742 Bytes
5d551ae
 
 
 
72e95a8
360e70e
72e95a8
 
 
 
 
 
 
5d551ae
 
 
 
 
 
fce9855
5d551ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import torch
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
from diffusers.utils import export_to_video
import streamlit as st
from diffusers import UNet2DConditionModel, TextEncoder, VQModel

# Use the default model names here
unet_model_name = "unet/diffusion_pytorch_model.bin"
text_encoder_name = "text_encoder/pytorch_model.bin"
vae_model_name = "vae/diffusion_pytorch_model.bin"

# Create the pipeline or model objects using the default names
pipeline = UNet2DConditionModel.from_pretrained(unet_model_name)
# Title and User Input
st.title("Text-to-Video with Streamlit")
prompt = st.text_input("Enter your text prompt:", "Spiderman is surfing")

# Button to trigger generation
if st.button("Generate Video"):
    st.video(video_path)

    # Ensure you have 'accelerate' version 0.17.0 or higher (see previous explanation)
    import accelerate
    if accelerate.__version__ < "0.17.0":
        st.warning("Please upgrade 'accelerate' to version 0.17.0 or higher for CPU offloading.")
    else:
        with st.spinner("Generating video..."):
            pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", 
                                         torch_dtype=torch.float16, 
                                         variant="fp16",
                                         device="cpu") # Force CPU usage
            pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
            pipe.enable_model_cpu_offload()  # Assuming 'accelerate' is updated  

            video_frames = pipe(prompt, num_inference_steps=25).frames
            video_path = export_to_video(video_frames)

            # Display the video in the Streamlit app
            st.video(video_path)