Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -162,16 +162,16 @@ pipe.to("cuda")
|
|
162 |
# pipe.load_lora_weights("TODO/TODO", adapter_name="ltx-lora")
|
163 |
# pipe.set_adapters(["lrx-lora"], adapter_weights=[1.0])
|
164 |
|
165 |
-
pipe.fuse_qkv_projections()
|
166 |
|
167 |
pipe.unet.to(memory_format=torch.channels_last)
|
168 |
pipe.vae.to(memory_format=torch.channels_last)
|
169 |
pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=True)
|
170 |
pipe.vae.decode = torch.compile(pipe.vae.decode, mode="max-autotune", fullgraph=True)
|
171 |
|
172 |
-
@spaces.GPU(duration=120
|
173 |
@torch.inference_mode()
|
174 |
-
def generate_video(prompt, negative_prompt, height, width, num_frames, num_inference_steps, fps, seed):
|
175 |
progress_steps = []
|
176 |
|
177 |
def setup_progressbar_length(_num_steps=num_inference_steps):
|
|
|
162 |
# pipe.load_lora_weights("TODO/TODO", adapter_name="ltx-lora")
|
163 |
# pipe.set_adapters(["lrx-lora"], adapter_weights=[1.0])
|
164 |
|
165 |
+
# pipe.fuse_qkv_projections()
|
166 |
|
167 |
pipe.unet.to(memory_format=torch.channels_last)
|
168 |
pipe.vae.to(memory_format=torch.channels_last)
|
169 |
pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=True)
|
170 |
pipe.vae.decode = torch.compile(pipe.vae.decode, mode="max-autotune", fullgraph=True)
|
171 |
|
172 |
+
@spaces.GPU(duration=120)
|
173 |
@torch.inference_mode()
|
174 |
+
def generate_video(prompt, negative_prompt, height, width, num_frames, num_inference_steps, fps, seed, progress=gr.Progress(track_tqdm=True)):
|
175 |
progress_steps = []
|
176 |
|
177 |
def setup_progressbar_length(_num_steps=num_inference_steps):
|