Spaces:
Runtime error
Runtime error
File size: 4,015 Bytes
10b581c 9dab6c2 10b581c 9dab6c2 10b581c 70f2266 10b581c 9dab6c2 a78ae85 9dab6c2 45a9d7f cc5ea83 9dab6c2 10b581c cc5ea83 9dab6c2 10b581c cc5ea83 9dab6c2 10b581c ecf6d80 10b581c f57c553 10b581c f57c553 10b581c ecf6d80 10b581c ecf6d80 10b581c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import gradio as gr
import os
import torch
from diffusers import AutoencoderKLCogVideoX, CogVideoXImageToVideoPipeline, CogVideoXTransformer3DModel
from diffusers.utils import export_to_video, load_image
from transformers import T5EncoderModel, T5Tokenizer
from datetime import datetime
import random
from huggingface_hub import hf_hub_download
# Ensure 'checkpoint' directory exists
os.makedirs("checkpoints", exist_ok=True)
hf_hub_download(
repo_id="wenqsun/DimensionX",
filename="orbit_left_lora_weights.safetensors",
local_dir="checkpoints"
)
hf_hub_download(
repo_id="wenqsun/DimensionX",
filename="orbit_up_lora_weights.safetensors",
local_dir="checkpoints"
)
model_id = "THUDM/CogVideoX-5b-I2V"
transformer = CogVideoXTransformer3DModel.from_pretrained(model_id, subfolder="transformer", torch_dtype=torch.float16)
text_encoder = T5EncoderModel.from_pretrained(model_id, subfolder="text_encoder", torch_dtype=torch.float16)
vae = AutoencoderKLCogVideoX.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float16)
tokenizer = T5Tokenizer.from_pretrained(model_id, subfolder="tokenizer")
pipe = CogVideoXImageToVideoPipeline.from_pretrained(model_id, tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, torch_dtype=torch.float16)
lora_path = "your lora path"
lora_rank = 256
def infer(image_path, prompt, orbit_type, progress=gr.Progress(track_tqdm=True)):
lora_path = "checkpoints/"
adapter_name = None
if orbit_type == "Left":
weight_name = "orbit_left_lora_weights.safetensors"
adapter_name = "orbit_left_lora_weights"
elif orbit_type == "Up":
weight_name = "orbit_up_lora_weights.safetensors"
adapter_name = "orbit_up_lora_weights"
lora_rank = 256
pipe.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=adapter_name)
pipe.fuse_lora(lora_scale=1 / lora_rank)
pipe.to("cuda")
prompt = f"{prompt}. High quality, ultrarealistic detail and breath-taking movie-like camera shot."
image = load_image(image_path)
seed = random.randint(0, 2**8 - 1)
video = pipe(
image,
prompt,
num_inference_steps=50, # NOT Changed
guidance_scale=7.0, # NOT Changed
use_dynamic_cfg=True,
generator=torch.Generator(device="cpu").manual_seed(seed)
)
# Generate a timestamp for the output filename
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
export_to_video(video.frames[0], f"output_{timestamp}.mp4", fps=8)
return f"output_{timestamp}.mp4"
with gr.Blocks() as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("# DimensionX")
gr.Markdown("### Create Any 3D and 4D Scenes from a Single Image with Controllable Video Diffusion")
with gr.Row():
with gr.Column():
image_in = gr.Image(label="Image Input", type="filepath")
prompt = gr.Textbox(label="Prompt")
orbit_type = gr.Radio(label="Orbit type", choices=["Left", "Up"], value="Left")
submit_btn = gr.Button("Submit")
with gr.Column():
video_out = gr.Video(label="Video output")
examples = gr.Examples(
examples = [
[
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg",
"An astronaut hatching from an egg, on the surface of the moon, the darkness and depth of space realised in the background.",
"Left",
"./examples/output_astronaut_left.mp4"
]
],
inputs=[image_in, prompt, orbit_type, video_out]
)
submit_btn.click(
fn=infer,
inputs=[image_in, prompt, orbit_type],
outputs=[video_out]
)
demo.queue().launch(show_error=True, show_api=False) |