Spaces:
Build error
Build error
File size: 4,365 Bytes
b7f7bb6 fe77a8e 0c48f4b b7f7bb6 4dc94b9 0c48f4b 4c34823 d03a679 7d2b140 b7f7bb6 30a8deb 7d2b140 4c34823 30a8deb b7f7bb6 4c34823 fe77a8e 4dc94b9 fe77a8e b7f7bb6 fe77a8e b7f7bb6 fe77a8e d03a679 c7d5c0d d03a679 c7d5c0d d03a679 4dc94b9 c7d5c0d 4dc94b9 c7d5c0d 1f6711e 4dc94b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import gradio as gr
import torch
from diffusers import I2VGenXLPipeline
from diffusers.utils import export_to_gif, load_image
import tempfile
import spaces
@spaces.GPU
def initialize_pipeline():
# Initialize the pipeline without CUDA support
pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
return pipeline
import gradio as gr
import torch
from diffusers import I2VGenXLPipeline
from diffusers.utils import export_to_gif, load_image
import tempfile
import spaces
@spaces.GPU
def initialize_pipeline():
# Initialize the pipeline without CUDA support
pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
return pipeline
def generate_gif(prompt, image, negative_prompt, num_inference_steps, guidance_scale, seed):
# Initialize the pipeline within the function
pipeline = initialize_pipeline()
# Set the generator seed
generator = torch.Generator().manual_seed(seed)
# Check if an image is provided
if image is not None and image != "": # Ensure image is not empty
image = load_image(image).convert("RGB")
frames = pipeline(
prompt=prompt,
image=image,
num_inference_steps=num_inference_steps,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
generator=generator
).frames[0]
else:
frames = pipeline(
prompt=prompt,
num_inference_steps=num_inference_steps,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
generator=generator
).frames[0]
# Export to GIF
with tempfile.NamedTemporaryFile(delete=False, suffix=".gif") as tmp_gif:
gif_path = tmp_gif.name
export_to_gif(frames, gif_path)
return gif_path
# Create the Gradio interface with tabs
with gr.Blocks() as demo:
with gr.Tabs():
with gr.TabItem("Generate from Text"):
with gr.Row():
with gr.Column():
text_prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt")
text_negative_prompt = gr.Textbox(lines=2, placeholder="Enter your negative prompt here...", label="Negative Prompt")
text_num_inference_steps = gr.Slider(1, 100, step=1, value=50, label="Number of Inference Steps")
text_guidance_scale = gr.Slider(1, 20, step=0.1, value=9.0, label="Guidance Scale")
text_seed = gr.Number(label="Seed", value=8888)
text_generate_button = gr.Button("Generate GIF")
with gr.Column():
text_output_video = gr.Video(label="Generated GIF")
# When generating from text, pass an empty string as the image input
text_generate_button.click(
fn=generate_gif,
inputs=[text_prompt, "", text_negative_prompt, text_num_inference_steps, text_guidance_scale, text_seed],
outputs=text_output_video
)
with gr.TabItem("Generate from Image"):
with gr.Row():
with gr.Column():
image_prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt")
image_input = gr.Image(type="filepath", label="Input Image")
image_negative_prompt = gr.Textbox(lines=2, placeholder="Enter your negative prompt here...", label="Negative Prompt")
image_num_inference_steps = gr.Slider(1, 100, step=1, value=50, label="Number of Inference Steps")
image_guidance_scale = gr.Slider(1, 20, step=0.1, value=9.0, label="Guidance Scale")
image_seed = gr.Number(label="Seed", value=8888)
image_generate_button = gr.Button("Generate GIF")
with gr.Column():
image_output_video = gr.Video(label="Generated GIF")
image_generate_button.click(
fn=generate_gif,
inputs=[image_prompt, image_input, image_negative_prompt, image_num_inference_steps, image_guidance_scale, image_seed],
outputs=image_output_video
)
# Launch the interface
demo.launch() |