Muhammad Taqi Raza
commited on
Commit
·
87f323a
1
Parent(s):
23f29c8
modifying requirements.txt
Browse files
inference/cli_demo_camera_i2v_pcd.py
CHANGED
@@ -368,13 +368,13 @@ def generate_video(
|
|
368 |
|
369 |
# ++++++++++++++++++++++++++++++++++++++
|
370 |
latents = video_generate_all # This is a latent
|
371 |
-
|
372 |
-
|
373 |
-
# latents = latents[0] # ✅ Unwrap the inner list of PIL images
|
374 |
|
375 |
-
|
376 |
-
|
377 |
-
|
|
|
378 |
|
379 |
print(f"Type of latents: {type(latents)}")
|
380 |
print(f"Length of latents: {len(latents)}")
|
@@ -406,8 +406,11 @@ def generate_video(
|
|
406 |
|
407 |
# Convert latents back to PIL images after processing
|
408 |
latents = latents.clamp(0, 1) # Clamp values to [0,1]
|
409 |
-
|
410 |
-
|
|
|
|
|
|
|
411 |
video_generate_all = latents
|
412 |
|
413 |
# ++++++++++++++++++++++++++++++++++++++
|
|
|
368 |
|
369 |
# ++++++++++++++++++++++++++++++++++++++
|
370 |
latents = video_generate_all # This is a latent
|
371 |
+
|
372 |
+
transform = T.ToTensor()
|
|
|
373 |
|
374 |
+
latents = [
|
375 |
+
torch.stack([transform(img) for img in sublist]) # [num_frames, C, H, W]
|
376 |
+
for sublist in latents
|
377 |
+
] # List of [T, C, H, W] tensors
|
378 |
|
379 |
print(f"Type of latents: {type(latents)}")
|
380 |
print(f"Length of latents: {len(latents)}")
|
|
|
406 |
|
407 |
# Convert latents back to PIL images after processing
|
408 |
latents = latents.clamp(0, 1) # Clamp values to [0,1]
|
409 |
+
to_pil = T.ToPILImage()
|
410 |
+
latents = [
|
411 |
+
[to_pil(frame.cpu()) for frame in video] # video: Tensor[T, C, H, W]
|
412 |
+
for video in latents
|
413 |
+
]
|
414 |
video_generate_all = latents
|
415 |
|
416 |
# ++++++++++++++++++++++++++++++++++++++
|