Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -85,7 +85,7 @@ def preprocess_video_in(video_path):
|
|
85 |
cap.release()
|
86 |
|
87 |
# 'image' is the first frame extracted from video_in
|
88 |
-
return first_frame, gr.State([]), gr.State([]), first_frame, first_frame, output_dir, None, None
|
89 |
|
90 |
def get_point(point_type, tracking_points, trackings_input_label, first_frame_path, evt: gr.SelectData):
|
91 |
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
|
@@ -362,7 +362,7 @@ with gr.Blocks() as demo:
|
|
362 |
with gr.Row():
|
363 |
|
364 |
with gr.Column():
|
365 |
-
|
366 |
|
367 |
with gr.Row():
|
368 |
point_type = gr.Radio(label="point type", choices=["include", "exclude"], value="include", scale=2)
|
@@ -379,6 +379,9 @@ with gr.Blocks() as demo:
|
|
379 |
with gr.Row():
|
380 |
checkpoint = gr.Dropdown(label="Checkpoint", choices=["tiny", "small", "base-plus", "large"], value="tiny")
|
381 |
submit_btn = gr.Button("Submit", size="lg")
|
|
|
|
|
|
|
382 |
|
383 |
with gr.Column():
|
384 |
output_result = gr.Image()
|
@@ -399,7 +402,7 @@ with gr.Blocks() as demo:
|
|
399 |
video_in.upload(
|
400 |
fn = preprocess_video_in,
|
401 |
inputs = [video_in],
|
402 |
-
outputs = [first_frame_path, tracking_points, trackings_input_label, input_first_frame_image, points_map, video_frames_dir, stored_inference_state, stored_frame_names],
|
403 |
queue = False
|
404 |
)
|
405 |
|
|
|
85 |
cap.release()
|
86 |
|
87 |
# 'image' is the first frame extracted from video_in
|
88 |
+
return first_frame, gr.State([]), gr.State([]), first_frame, first_frame, output_dir, None, None, gr.update(open=False)
|
89 |
|
90 |
def get_point(point_type, tracking_points, trackings_input_label, first_frame_path, evt: gr.SelectData):
|
91 |
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
|
|
|
362 |
with gr.Row():
|
363 |
|
364 |
with gr.Column():
|
365 |
+
|
366 |
|
367 |
with gr.Row():
|
368 |
point_type = gr.Radio(label="point type", choices=["include", "exclude"], value="include", scale=2)
|
|
|
379 |
with gr.Row():
|
380 |
checkpoint = gr.Dropdown(label="Checkpoint", choices=["tiny", "small", "base-plus", "large"], value="tiny")
|
381 |
submit_btn = gr.Button("Submit", size="lg")
|
382 |
+
|
383 |
+
with gr.Accordion("Your video IN", open=True) as video_in_drawer:
|
384 |
+
video_in = gr.Video(label="Video IN")
|
385 |
|
386 |
with gr.Column():
|
387 |
output_result = gr.Image()
|
|
|
402 |
video_in.upload(
|
403 |
fn = preprocess_video_in,
|
404 |
inputs = [video_in],
|
405 |
+
outputs = [first_frame_path, tracking_points, trackings_input_label, input_first_frame_image, points_map, video_frames_dir, stored_inference_state, stored_frame_names, video_in_drawer],
|
406 |
queue = False
|
407 |
)
|
408 |
|