Muhammad Taqi Raza commited on
Commit
49304f4
·
1 Parent(s): 78295a1
Files changed (2) hide show
  1. gradio_app.py +1 -1
  2. inference/v2v_data/demo.py +2 -1
gradio_app.py CHANGED
@@ -68,7 +68,7 @@ def run_epic_inference(video_path, caption, motion_type):
68
  "--traj_txt",
69
  traj_txt,
70
  "--save_name",
71
- f"amalfi-coast_traj_{traj_name}",
72
  "--mode",
73
  "gradual",
74
  "--out_dir",
 
68
  "--traj_txt",
69
  traj_txt,
70
  "--save_name",
71
+ f"temp_input",
72
  "--mode",
73
  "gradual",
74
  "--out_dir",
inference/v2v_data/demo.py CHANGED
@@ -130,7 +130,7 @@ class GetAnchorVideos:
130
  window_size=opts.window_size,
131
  overlap=opts.overlap,
132
  ).to(opts.device)
133
-
134
  frames = (
135
  torch.from_numpy(frames).permute(0, 3, 1, 2).to(opts.device) * 2.0 - 1.0
136
  ) # 49 576 1024 3 -> 49 3 576 1024, [-1,1]
@@ -216,6 +216,7 @@ class GetAnchorVideos:
216
  window_size=opts.window_size,
217
  overlap=opts.overlap,
218
  ).to(opts.device)
 
219
  frames = (
220
  torch.from_numpy(frames).permute(0, 3, 1, 2).to(opts.device) * 2.0 - 1.0
221
  ) # 49 576 1024 3 -> 49 3 576 1024, [-1,1]
 
130
  window_size=opts.window_size,
131
  overlap=opts.overlap,
132
  ).to(opts.device)
133
+
134
  frames = (
135
  torch.from_numpy(frames).permute(0, 3, 1, 2).to(opts.device) * 2.0 - 1.0
136
  ) # 49 576 1024 3 -> 49 3 576 1024, [-1,1]
 
216
  window_size=opts.window_size,
217
  overlap=opts.overlap,
218
  ).to(opts.device)
219
+
220
  frames = (
221
  torch.from_numpy(frames).permute(0, 3, 1, 2).to(opts.device) * 2.0 - 1.0
222
  ) # 49 576 1024 3 -> 49 3 576 1024, [-1,1]