fffiloni commited on
Commit
1c84a81
·
verified ·
1 Parent(s): 8959db0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -123,7 +123,7 @@ def preprocess_video_in(video_path):
123
  gr.update(open=False) # video_in_drawer
124
  ]
125
 
126
- def get_point(point_type, tracking_points, trackings_input_label, first_frame_path, evt: gr.SelectData):
127
  print(f"You selected {evt.value} at {evt.index} from {evt.target}")
128
 
129
  tracking_points.value.append(evt.index)
@@ -301,7 +301,7 @@ def get_mask_sam_process(
301
 
302
  return "output_first_frame.jpg", frame_names, predictor, inference_state, gr.update(choices=available_frames_to_check, value=working_frame, visible=True)
303
 
304
- def propagate_to_all(video_in, checkpoint, stored_inference_state, stored_frame_names, video_frames_dir, vis_frame_type, available_frames_to_check, progress=gr.Progress(track_tqdm=True)):
305
  #### PROPAGATION ####
306
  sam2_checkpoint, model_cfg = load_model(checkpoint)
307
  predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint)
@@ -356,7 +356,7 @@ def propagate_to_all(video_in, checkpoint, stored_inference_state, stored_frame_
356
  print(f"JPEG_IMAGES: {jpeg_images}")
357
 
358
  if vis_frame_type == "check":
359
- return gr.update(value=jpeg_images), gr.update(value=None), gr.update(choices=available_frames_to_check, value=None, visible=True), available_frames_to_check
360
  elif vis_frame_type == "render":
361
  # Create a video clip from the image sequence
362
  original_fps = get_video_fps(video_in)
@@ -372,7 +372,7 @@ def propagate_to_all(video_in, checkpoint, stored_inference_state, stored_frame_
372
  codec='libx264'
373
  )
374
 
375
- return gr.update(value=None), gr.update(value=final_vid_output_path), None, available_frames_to_check
376
 
377
  def update_ui(vis_frame_type):
378
  if vis_frame_type == "check":
@@ -556,7 +556,7 @@ with gr.Blocks() as demo:
556
  queue=False
557
  ).then(
558
  fn = propagate_to_all,
559
- inputs = [video_in, checkpoint, stored_inference_state, stored_frame_names, video_frames_dir, vis_frame_type, available_frames_to_check],
560
  outputs = [output_propagated, output_video, working_frame, available_frames_to_check]
561
  )
562
 
 
123
  gr.update(open=False) # video_in_drawer
124
  ]
125
 
126
+ def get_point(point_type, tracking_points, trackings_input_label, input_first_frame_path, evt: gr.SelectData):
127
  print(f"You selected {evt.value} at {evt.index} from {evt.target}")
128
 
129
  tracking_points.value.append(evt.index)
 
301
 
302
  return "output_first_frame.jpg", frame_names, predictor, inference_state, gr.update(choices=available_frames_to_check, value=working_frame, visible=True)
303
 
304
+ def propagate_to_all(video_in, checkpoint, stored_inference_state, stored_frame_names, video_frames_dir, vis_frame_type, available_frames_to_check, working_frame, progress=gr.Progress(track_tqdm=True)):
305
  #### PROPAGATION ####
306
  sam2_checkpoint, model_cfg = load_model(checkpoint)
307
  predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint)
 
356
  print(f"JPEG_IMAGES: {jpeg_images}")
357
 
358
  if vis_frame_type == "check":
359
+ return gr.update(value=jpeg_images), gr.update(value=None), gr.update(choices=available_frames_to_check, value=working_frame, visible=True), available_frames_to_check
360
  elif vis_frame_type == "render":
361
  # Create a video clip from the image sequence
362
  original_fps = get_video_fps(video_in)
 
372
  codec='libx264'
373
  )
374
 
375
+ return gr.update(value=None), gr.update(value=final_vid_output_path), working_frame, available_frames_to_check
376
 
377
  def update_ui(vis_frame_type):
378
  if vis_frame_type == "check":
 
556
  queue=False
557
  ).then(
558
  fn = propagate_to_all,
559
+ inputs = [video_in, checkpoint, stored_inference_state, stored_frame_names, video_frames_dir, vis_frame_type, available_frames_to_check, working_frame],
560
  outputs = [output_propagated, output_video, working_frame, available_frames_to_check]
561
  )
562