Muhammad Taqi Raza commited on
Commit
6979aa7
·
1 Parent(s): 46c9ce1
gradio_app.py CHANGED
@@ -232,7 +232,7 @@ def run_epic_inference(video_path, fps, num_frames, target_pose, mode):
232
  "--video_length", str(num_frames),
233
  "--save_name", "output",
234
  "--mode", mode,
235
- "--fps", fps
236
  ]
237
  try:
238
  result = subprocess.run(command, capture_output=True, text=True, check=True)
 
232
  "--video_length", str(num_frames),
233
  "--save_name", "output",
234
  "--mode", mode,
235
+ "--fps", str(fps)
236
  ]
237
  try:
238
  result = subprocess.run(command, capture_output=True, text=True, check=True)
inference/cli_demo_camera_i2v_pcd.py CHANGED
@@ -160,6 +160,7 @@ def generate_video(
160
  infer_with_mask: bool = False,
161
  pool_style: str = 'avg',
162
  pipe_cpu_offload: bool = False,
 
163
  ):
164
  """
165
  Generates a video based on the given prompt and saves it to the specified path.
@@ -216,6 +217,7 @@ def generate_video(
216
  downscale_coef=downscale_coef,
217
  in_channels=controlnet_input_channels,
218
  use_zero_conv=use_zero_conv,
 
219
  **controlnet_kwargs,
220
  )
221
  if controlnet_model_path:
@@ -348,8 +350,8 @@ def generate_video(
348
  video_generate = video_generate_all[0]
349
 
350
  # 6. Export the generated frames to a video file. fps must be 8 for original video.
351
- export_to_video(video_generate, output_path_file, fps=8)
352
- export_to_video(reference_frames, output_path_file_reference, fps=8)
353
  out_reference_frames = [
354
  stack_images_horizontally(frame_reference, frame_out)
355
  for frame_out, frame_reference in zip(video_generate, reference_frames)
@@ -360,7 +362,7 @@ def generate_video(
360
  stack_images_horizontally(frame_out, frame_reference)
361
  for frame_out, frame_reference in zip(out_reference_frames, anchor_video)
362
  ]
363
- export_to_video(out_reference_frames, output_path_file_out_reference, fps=8)
364
 
365
 
366
  if __name__ == "__main__":
@@ -415,6 +417,7 @@ if __name__ == "__main__":
415
  parser.add_argument("--controlnet_input_channels", type=int, default=6)
416
  parser.add_argument("--controlnet_transformer_num_layers", type=int, default=8)
417
  parser.add_argument("--enable_model_cpu_offload", action="store_true", default=False, help="Enable model CPU offload")
 
418
 
419
  args = parser.parse_args()
420
  dtype = torch.float16 if args.dtype == "float16" else torch.bfloat16
@@ -451,4 +454,5 @@ if __name__ == "__main__":
451
  infer_with_mask=args.infer_with_mask,
452
  pool_style=args.pool_style,
453
  pipe_cpu_offload=args.enable_model_cpu_offload,
 
454
  )
 
160
  infer_with_mask: bool = False,
161
  pool_style: str = 'avg',
162
  pipe_cpu_offload: bool = False,
163
+ fps: int = 8,
164
  ):
165
  """
166
  Generates a video based on the given prompt and saves it to the specified path.
 
217
  downscale_coef=downscale_coef,
218
  in_channels=controlnet_input_channels,
219
  use_zero_conv=use_zero_conv,
220
+ sample_frames = num_frames, # 49 frames
221
  **controlnet_kwargs,
222
  )
223
  if controlnet_model_path:
 
350
  video_generate = video_generate_all[0]
351
 
352
  # 6. Export the generated frames to a video file. fps must be 8 for original video.
353
+ export_to_video(video_generate, output_path_file, fps=fps)
354
+ export_to_video(reference_frames, output_path_file_reference, fps=fps)
355
  out_reference_frames = [
356
  stack_images_horizontally(frame_reference, frame_out)
357
  for frame_out, frame_reference in zip(video_generate, reference_frames)
 
362
  stack_images_horizontally(frame_out, frame_reference)
363
  for frame_out, frame_reference in zip(out_reference_frames, anchor_video)
364
  ]
365
+ export_to_video(out_reference_frames, output_path_file_out_reference, fps=fps)
366
 
367
 
368
  if __name__ == "__main__":
 
417
  parser.add_argument("--controlnet_input_channels", type=int, default=6)
418
  parser.add_argument("--controlnet_transformer_num_layers", type=int, default=8)
419
  parser.add_argument("--enable_model_cpu_offload", action="store_true", default=False, help="Enable model CPU offload")
420
+ parser.add_argument("--fps", type=int, default=8, help="Frames per second for the output video")
421
 
422
  args = parser.parse_args()
423
  dtype = torch.float16 if args.dtype == "float16" else torch.bfloat16
 
454
  infer_with_mask=args.infer_with_mask,
455
  pool_style=args.pool_style,
456
  pipe_cpu_offload=args.enable_model_cpu_offload,
457
+ fps=args.fps,
458
  )