import gradio as gr import torch import numpy as np from diffusers import I2VGenXLPipeline from PIL import Image from moviepy.editor import ImageSequenceClip import io def generate_video(image, prompt, negative_prompt, video_length): generator = torch.manual_seed(8888) # Set the device to CPU or a non-NVIDIA GPU device = torch.device("mps" if torch.backends.mps.is_available() else "cpu") print(f"Using device: {device}") # Load the pipeline pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float32) pipeline.to(device) # Move the model to the selected device # Generate frames with progress tracking frames = [] total_frames = video_length * 30 # Assuming 30 frames per second for i in range(total_frames): frame = pipeline( prompt=prompt, image=image, num_inference_steps=5, negative_prompt=negative_prompt, guidance_scale=9.0, generator=generator, num_frames=1 ).frames[0] frames.append(np.array(frame)) # Update progress yield (i + 1) / total_frames # Yield progress # Create a video clip from the frames output_file = "output_video.mp4" clip = ImageSequenceClip(frames, fps=30) # Set the frames per second clip.write_videofile(output_file, codec='libx264', audio=False) return output_file # Gradio interface def interface(image, prompt, negative_prompt, video_length): # Convert the uploaded image to a PIL Image image = Image.open(io.BytesIO(image.read())) # Generate video and track progress return generate_video(image, prompt, negative_prompt, video_length) # Create Gradio Blocks with gr.Blocks() as demo: gr.Markdown("# AI-Powered Video Generation") with gr.Row(): image_input = gr.Image(type="file", label="Upload Image") prompt_input = gr.Textbox(label="Enter the Prompt") negative_prompt_input = gr.Textbox(label="Enter the Negative Prompt") video_length_input = gr.Number(label="Video Length (seconds)", value=10, precision=0) generate_button = gr.Button("Generate Video") output_video = gr.Video(label="Output Video") # Define the button action generate_button.click( interface, inputs=[image_input, prompt_input, negative_prompt_input, video_length_input], outputs=output_video, show_progress=True # Show progress bar ) # Launch the Gradio app demo.launch()